2024-12-09 17:21:00,115 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-09 17:21:00,130 main DEBUG Took 0.012485 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-09 17:21:00,131 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-09 17:21:00,131 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-09 17:21:00,132 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-09 17:21:00,133 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 17:21:00,142 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-09 17:21:00,154 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 17:21:00,155 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 17:21:00,156 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 17:21:00,156 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 17:21:00,156 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 17:21:00,157 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 17:21:00,157 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 17:21:00,158 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 17:21:00,158 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 17:21:00,158 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 17:21:00,159 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 17:21:00,159 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 17:21:00,159 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 17:21:00,160 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 17:21:00,160 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 17:21:00,160 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 17:21:00,161 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 17:21:00,161 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 17:21:00,161 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 17:21:00,161 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 17:21:00,162 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 17:21:00,162 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 17:21:00,162 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 17:21:00,163 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 17:21:00,163 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 17:21:00,163 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-09 17:21:00,164 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 17:21:00,166 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-09 17:21:00,167 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-09 17:21:00,168 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-09 17:21:00,169 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-09 17:21:00,169 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-09 17:21:00,176 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-09 17:21:00,179 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-09 17:21:00,180 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-09 17:21:00,181 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-09 17:21:00,181 main DEBUG createAppenders(={Console}) 2024-12-09 17:21:00,181 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-12-09 17:21:00,182 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-09 17:21:00,182 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-12-09 17:21:00,182 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-09 17:21:00,183 main DEBUG OutputStream closed 2024-12-09 17:21:00,183 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-09 17:21:00,183 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-09 17:21:00,183 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-12-09 17:21:00,245 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-09 17:21:00,247 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-09 17:21:00,248 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-09 17:21:00,249 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-09 17:21:00,250 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-09 17:21:00,250 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-09 17:21:00,250 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-09 17:21:00,250 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-09 17:21:00,250 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-09 17:21:00,251 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-09 17:21:00,251 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-09 17:21:00,251 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-09 17:21:00,252 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-09 17:21:00,252 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-09 17:21:00,252 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-09 17:21:00,252 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-09 17:21:00,253 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-09 17:21:00,253 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-09 17:21:00,255 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09 17:21:00,255 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-12-09 17:21:00,256 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-09 17:21:00,256 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-12-09T17:21:00,468 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50 2024-12-09 17:21:00,471 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-09 17:21:00,471 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09T17:21:00,479 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-12-09T17:21:00,497 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T17:21:00,500 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/cluster_4037d738-ebcb-129a-bff6-ec03dcaba43b, deleteOnExit=true 2024-12-09T17:21:00,501 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-09T17:21:00,502 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/test.cache.data in system properties and HBase conf 2024-12-09T17:21:00,502 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T17:21:00,503 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/hadoop.log.dir in system properties and HBase conf 2024-12-09T17:21:00,504 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T17:21:00,505 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T17:21:00,505 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-09T17:21:00,588 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-09T17:21:00,667 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T17:21:00,670 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T17:21:00,670 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T17:21:00,671 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T17:21:00,671 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T17:21:00,671 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T17:21:00,672 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T17:21:00,672 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T17:21:00,672 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T17:21:00,673 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T17:21:00,673 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/nfs.dump.dir in system properties and HBase conf 2024-12-09T17:21:00,673 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/java.io.tmpdir in system properties and HBase conf 2024-12-09T17:21:00,674 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T17:21:00,674 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T17:21:00,675 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T17:21:01,570 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-09T17:21:01,635 INFO [Time-limited test {}] log.Log(170): Logging initialized @2093ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-09T17:21:01,699 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T17:21:01,758 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T17:21:01,777 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T17:21:01,777 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T17:21:01,778 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T17:21:01,790 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T17:21:01,793 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73882ca4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/hadoop.log.dir/,AVAILABLE} 2024-12-09T17:21:01,794 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@588be694{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T17:21:01,960 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f0d4558{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/java.io.tmpdir/jetty-localhost-37435-hadoop-hdfs-3_4_1-tests_jar-_-any-8190204735082107523/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T17:21:01,965 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a299586{HTTP/1.1, (http/1.1)}{localhost:37435} 2024-12-09T17:21:01,966 INFO [Time-limited test {}] server.Server(415): Started @2424ms 2024-12-09T17:21:02,413 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T17:21:02,421 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T17:21:02,423 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T17:21:02,423 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T17:21:02,423 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T17:21:02,424 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57582772{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/hadoop.log.dir/,AVAILABLE} 2024-12-09T17:21:02,425 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@63d4d645{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T17:21:02,520 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bd2e890{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/java.io.tmpdir/jetty-localhost-35691-hadoop-hdfs-3_4_1-tests_jar-_-any-15142602455075330541/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T17:21:02,521 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2d3fa6ef{HTTP/1.1, (http/1.1)}{localhost:35691} 2024-12-09T17:21:02,521 INFO [Time-limited test {}] server.Server(415): Started @2979ms 2024-12-09T17:21:02,570 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T17:21:03,769 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/cluster_4037d738-ebcb-129a-bff6-ec03dcaba43b/dfs/data/data2/current/BP-1064377711-172.17.0.2-1733764861151/current, will proceed with Du for space computation calculation, 2024-12-09T17:21:03,769 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/cluster_4037d738-ebcb-129a-bff6-ec03dcaba43b/dfs/data/data1/current/BP-1064377711-172.17.0.2-1733764861151/current, will proceed with Du for space computation calculation, 2024-12-09T17:21:03,794 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T17:21:03,840 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfacbb5ac5899aa3e with lease ID 0x37eb0c433596ebdf: Processing first storage report for DS-b480ce71-6cf4-4f94-bfb0-6da93c8b9c35 from datanode DatanodeRegistration(127.0.0.1:38771, datanodeUuid=5956346e-f1fd-43bb-ade0-bbf2c491cc6a, infoPort=45197, infoSecurePort=0, ipcPort=41003, storageInfo=lv=-57;cid=testClusterID;nsid=1990759853;c=1733764861151) 2024-12-09T17:21:03,842 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfacbb5ac5899aa3e with lease ID 0x37eb0c433596ebdf: from storage DS-b480ce71-6cf4-4f94-bfb0-6da93c8b9c35 node DatanodeRegistration(127.0.0.1:38771, datanodeUuid=5956346e-f1fd-43bb-ade0-bbf2c491cc6a, infoPort=45197, infoSecurePort=0, ipcPort=41003, storageInfo=lv=-57;cid=testClusterID;nsid=1990759853;c=1733764861151), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T17:21:03,842 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfacbb5ac5899aa3e with lease ID 0x37eb0c433596ebdf: Processing first storage report for DS-8dd3df38-886b-4fd8-afb8-87b339fdbe51 from datanode DatanodeRegistration(127.0.0.1:38771, datanodeUuid=5956346e-f1fd-43bb-ade0-bbf2c491cc6a, infoPort=45197, infoSecurePort=0, ipcPort=41003, storageInfo=lv=-57;cid=testClusterID;nsid=1990759853;c=1733764861151) 2024-12-09T17:21:03,842 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfacbb5ac5899aa3e with lease ID 0x37eb0c433596ebdf: from storage DS-8dd3df38-886b-4fd8-afb8-87b339fdbe51 node DatanodeRegistration(127.0.0.1:38771, datanodeUuid=5956346e-f1fd-43bb-ade0-bbf2c491cc6a, infoPort=45197, infoSecurePort=0, ipcPort=41003, storageInfo=lv=-57;cid=testClusterID;nsid=1990759853;c=1733764861151), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T17:21:03,861 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50 2024-12-09T17:21:03,924 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/cluster_4037d738-ebcb-129a-bff6-ec03dcaba43b/zookeeper_0, clientPort=54326, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/cluster_4037d738-ebcb-129a-bff6-ec03dcaba43b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/cluster_4037d738-ebcb-129a-bff6-ec03dcaba43b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T17:21:03,934 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=54326 2024-12-09T17:21:03,943 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T17:21:03,945 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T17:21:04,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741825_1001 (size=7) 2024-12-09T17:21:04,555 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4 with version=8 2024-12-09T17:21:04,556 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/hbase-staging 2024-12-09T17:21:04,660 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-09T17:21:04,888 INFO [Time-limited test {}] client.ConnectionUtils(129): master/80c69eb3c456:0 server-side Connection retries=45 2024-12-09T17:21:04,903 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T17:21:04,904 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T17:21:04,904 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T17:21:04,904 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T17:21:04,904 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T17:21:05,009 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T17:21:05,057 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-09T17:21:05,064 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-09T17:21:05,067 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T17:21:05,088 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 38320 (auto-detected) 2024-12-09T17:21:05,089 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-09T17:21:05,106 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:45541 2024-12-09T17:21:05,113 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T17:21:05,115 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T17:21:05,126 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:45541 connecting to ZooKeeper ensemble=127.0.0.1:54326 2024-12-09T17:21:05,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:455410x0, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T17:21:05,237 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45541-0x1000bcf74250000 connected 2024-12-09T17:21:05,310 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T17:21:05,313 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T17:21:05,316 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T17:21:05,319 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45541 2024-12-09T17:21:05,319 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45541 2024-12-09T17:21:05,320 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45541 2024-12-09T17:21:05,320 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45541 2024-12-09T17:21:05,320 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45541 2024-12-09T17:21:05,327 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4, hbase.cluster.distributed=false 2024-12-09T17:21:05,380 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/80c69eb3c456:0 server-side Connection retries=45 2024-12-09T17:21:05,380 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T17:21:05,380 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T17:21:05,381 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T17:21:05,381 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T17:21:05,381 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T17:21:05,383 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T17:21:05,385 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T17:21:05,386 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:42927 2024-12-09T17:21:05,387 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T17:21:05,391 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T17:21:05,393 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T17:21:05,395 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T17:21:05,398 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:42927 connecting to ZooKeeper ensemble=127.0.0.1:54326 2024-12-09T17:21:05,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:429270x0, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T17:21:05,409 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:429270x0, quorum=127.0.0.1:54326, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T17:21:05,409 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42927-0x1000bcf74250001 connected 2024-12-09T17:21:05,411 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42927-0x1000bcf74250001, quorum=127.0.0.1:54326, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T17:21:05,412 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42927-0x1000bcf74250001, quorum=127.0.0.1:54326, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T17:21:05,413 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42927 2024-12-09T17:21:05,414 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42927 2024-12-09T17:21:05,415 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42927 2024-12-09T17:21:05,420 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42927 2024-12-09T17:21:05,420 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42927 2024-12-09T17:21:05,422 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/80c69eb3c456,45541,1733764864652 2024-12-09T17:21:05,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T17:21:05,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42927-0x1000bcf74250001, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T17:21:05,435 DEBUG [M:0;80c69eb3c456:45541 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;80c69eb3c456:45541 2024-12-09T17:21:05,436 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/80c69eb3c456,45541,1733764864652 2024-12-09T17:21:05,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T17:21:05,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42927-0x1000bcf74250001, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T17:21:05,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T17:21:05,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42927-0x1000bcf74250001, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T17:21:05,459 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T17:21:05,460 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/80c69eb3c456,45541,1733764864652 from backup master directory 2024-12-09T17:21:05,460 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T17:21:05,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42927-0x1000bcf74250001, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T17:21:05,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/80c69eb3c456,45541,1733764864652 2024-12-09T17:21:05,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T17:21:05,468 WARN [master/80c69eb3c456:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T17:21:05,468 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=80c69eb3c456,45541,1733764864652 2024-12-09T17:21:05,470 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-09T17:21:05,471 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-09T17:21:05,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741826_1002 (size=42) 2024-12-09T17:21:05,931 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/hbase.id with ID: e838248c-c998-4cc6-9ba2-0c99d941e22d 2024-12-09T17:21:05,968 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T17:21:06,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42927-0x1000bcf74250001, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T17:21:06,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T17:21:06,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741827_1003 (size=196) 2024-12-09T17:21:06,428 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T17:21:06,429 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T17:21:06,444 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:06,447 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T17:21:06,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741828_1004 (size=1189) 2024-12-09T17:21:06,887 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/data/master/store 2024-12-09T17:21:06,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741829_1005 (size=34) 2024-12-09T17:21:07,306 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-09T17:21:07,306 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T17:21:07,307 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T17:21:07,307 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T17:21:07,307 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T17:21:07,307 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T17:21:07,308 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T17:21:07,308 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T17:21:07,308 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-09T17:21:07,310 WARN [master/80c69eb3c456:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/data/master/store/.initializing 2024-12-09T17:21:07,310 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/WALs/80c69eb3c456,45541,1733764864652 2024-12-09T17:21:07,316 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T17:21:07,326 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=80c69eb3c456%2C45541%2C1733764864652, suffix=, logDir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/WALs/80c69eb3c456,45541,1733764864652, archiveDir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/oldWALs, maxLogs=10 2024-12-09T17:21:07,344 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/WALs/80c69eb3c456,45541,1733764864652/80c69eb3c456%2C45541%2C1733764864652.1733764867330, exclude list is [], retry=0 2024-12-09T17:21:07,358 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38771,DS-b480ce71-6cf4-4f94-bfb0-6da93c8b9c35,DISK] 2024-12-09T17:21:07,361 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-09T17:21:07,390 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/WALs/80c69eb3c456,45541,1733764864652/80c69eb3c456%2C45541%2C1733764864652.1733764867330 2024-12-09T17:21:07,391 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45197:45197)] 2024-12-09T17:21:07,391 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T17:21:07,392 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T17:21:07,395 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T17:21:07,395 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T17:21:07,427 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T17:21:07,447 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T17:21:07,450 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:07,453 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T17:21:07,453 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T17:21:07,457 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T17:21:07,457 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:07,458 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:21:07,459 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T17:21:07,462 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T17:21:07,462 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:07,463 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:21:07,463 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T17:21:07,466 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T17:21:07,467 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:07,468 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:21:07,472 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T17:21:07,473 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T17:21:07,481 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T17:21:07,484 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T17:21:07,489 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T17:21:07,490 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75415426, jitterRate=0.12377741932868958}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T17:21:07,494 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-09T17:21:07,494 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T17:21:07,519 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a09c237, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:07,546 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-09T17:21:07,555 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T17:21:07,556 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T17:21:07,557 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T17:21:07,559 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-09T17:21:07,563 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 3 msec 2024-12-09T17:21:07,563 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T17:21:07,585 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T17:21:07,597 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T17:21:07,642 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-09T17:21:07,644 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T17:21:07,645 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T17:21:07,650 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-09T17:21:07,652 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T17:21:07,655 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T17:21:07,666 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-09T17:21:07,667 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T17:21:07,675 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T17:21:07,685 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T17:21:07,691 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T17:21:07,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42927-0x1000bcf74250001, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T17:21:07,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42927-0x1000bcf74250001, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T17:21:07,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T17:21:07,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T17:21:07,701 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=80c69eb3c456,45541,1733764864652, sessionid=0x1000bcf74250000, setting cluster-up flag (Was=false) 2024-12-09T17:21:07,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42927-0x1000bcf74250001, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T17:21:07,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T17:21:07,750 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T17:21:07,752 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=80c69eb3c456,45541,1733764864652 2024-12-09T17:21:07,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T17:21:07,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42927-0x1000bcf74250001, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T17:21:07,792 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T17:21:07,793 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=80c69eb3c456,45541,1733764864652 2024-12-09T17:21:07,835 DEBUG [RS:0;80c69eb3c456:42927 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;80c69eb3c456:42927 2024-12-09T17:21:07,837 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(1008): ClusterId : e838248c-c998-4cc6-9ba2-0c99d941e22d 2024-12-09T17:21:07,839 DEBUG [RS:0;80c69eb3c456:42927 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T17:21:07,852 DEBUG [RS:0;80c69eb3c456:42927 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T17:21:07,852 DEBUG [RS:0;80c69eb3c456:42927 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T17:21:07,859 DEBUG [RS:0;80c69eb3c456:42927 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T17:21:07,860 DEBUG [RS:0;80c69eb3c456:42927 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b38442f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:07,862 DEBUG [RS:0;80c69eb3c456:42927 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53f3c5f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=80c69eb3c456/172.17.0.2:0 2024-12-09T17:21:07,865 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-09T17:21:07,865 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-09T17:21:07,865 DEBUG [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-09T17:21:07,867 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(3073): reportForDuty to master=80c69eb3c456,45541,1733764864652 with isa=80c69eb3c456/172.17.0.2:42927, startcode=1733764865379 2024-12-09T17:21:07,869 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-09T17:21:07,874 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-09T17:21:07,876 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T17:21:07,877 DEBUG [RS:0;80c69eb3c456:42927 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T17:21:07,881 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 80c69eb3c456,45541,1733764864652 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T17:21:07,884 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/80c69eb3c456:0, corePoolSize=5, maxPoolSize=5 2024-12-09T17:21:07,885 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/80c69eb3c456:0, corePoolSize=5, maxPoolSize=5 2024-12-09T17:21:07,885 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/80c69eb3c456:0, corePoolSize=5, maxPoolSize=5 2024-12-09T17:21:07,885 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/80c69eb3c456:0, corePoolSize=5, maxPoolSize=5 2024-12-09T17:21:07,885 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/80c69eb3c456:0, corePoolSize=10, maxPoolSize=10 2024-12-09T17:21:07,885 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/80c69eb3c456:0, corePoolSize=1, maxPoolSize=1 2024-12-09T17:21:07,885 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/80c69eb3c456:0, corePoolSize=2, maxPoolSize=2 2024-12-09T17:21:07,885 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/80c69eb3c456:0, corePoolSize=1, maxPoolSize=1 2024-12-09T17:21:07,889 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733764897889 2024-12-09T17:21:07,891 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T17:21:07,892 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T17:21:07,896 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-09T17:21:07,896 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T17:21:07,896 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-09T17:21:07,897 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T17:21:07,897 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T17:21:07,897 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T17:21:07,903 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:07,903 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T17:21:07,904 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T17:21:07,906 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T17:21:07,907 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T17:21:07,907 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T17:21:07,910 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T17:21:07,911 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38249, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T17:21:07,911 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T17:21:07,915 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/80c69eb3c456:0:becomeActiveMaster-HFileCleaner.large.0-1733764867912,5,FailOnTimeoutGroup] 2024-12-09T17:21:07,916 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/80c69eb3c456:0:becomeActiveMaster-HFileCleaner.small.0-1733764867915,5,FailOnTimeoutGroup] 2024-12-09T17:21:07,916 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T17:21:07,916 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T17:21:07,916 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45541 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:07,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741831_1007 (size=1039) 2024-12-09T17:21:07,917 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T17:21:07,918 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T17:21:07,922 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-09T17:21:07,922 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4 2024-12-09T17:21:07,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741832_1008 (size=32) 2024-12-09T17:21:07,940 DEBUG [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-09T17:21:07,940 WARN [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-09T17:21:08,041 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(3073): reportForDuty to master=80c69eb3c456,45541,1733764864652 with isa=80c69eb3c456/172.17.0.2:42927, startcode=1733764865379 2024-12-09T17:21:08,043 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45541 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:08,045 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45541 {}] master.ServerManager(486): Registering regionserver=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:08,051 DEBUG [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4 2024-12-09T17:21:08,052 DEBUG [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:42193 2024-12-09T17:21:08,052 DEBUG [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-09T17:21:08,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T17:21:08,059 DEBUG [RS:0;80c69eb3c456:42927 {}] zookeeper.ZKUtil(111): regionserver:42927-0x1000bcf74250001, quorum=127.0.0.1:54326, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/80c69eb3c456,42927,1733764865379 2024-12-09T17:21:08,059 WARN [RS:0;80c69eb3c456:42927 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T17:21:08,059 INFO [RS:0;80c69eb3c456:42927 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T17:21:08,059 DEBUG [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/WALs/80c69eb3c456,42927,1733764865379 2024-12-09T17:21:08,062 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [80c69eb3c456,42927,1733764865379] 2024-12-09T17:21:08,072 DEBUG [RS:0;80c69eb3c456:42927 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-09T17:21:08,081 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T17:21:08,093 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T17:21:08,096 INFO [RS:0;80c69eb3c456:42927 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T17:21:08,096 INFO [RS:0;80c69eb3c456:42927 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T17:21:08,096 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-09T17:21:08,102 INFO [RS:0;80c69eb3c456:42927 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T17:21:08,103 DEBUG [RS:0;80c69eb3c456:42927 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/80c69eb3c456:0, corePoolSize=1, maxPoolSize=1 2024-12-09T17:21:08,103 DEBUG [RS:0;80c69eb3c456:42927 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/80c69eb3c456:0, corePoolSize=1, maxPoolSize=1 2024-12-09T17:21:08,103 DEBUG [RS:0;80c69eb3c456:42927 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/80c69eb3c456:0, corePoolSize=1, maxPoolSize=1 2024-12-09T17:21:08,103 DEBUG [RS:0;80c69eb3c456:42927 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/80c69eb3c456:0, corePoolSize=1, maxPoolSize=1 2024-12-09T17:21:08,103 DEBUG [RS:0;80c69eb3c456:42927 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/80c69eb3c456:0, corePoolSize=1, maxPoolSize=1 2024-12-09T17:21:08,103 DEBUG [RS:0;80c69eb3c456:42927 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/80c69eb3c456:0, corePoolSize=2, maxPoolSize=2 2024-12-09T17:21:08,104 DEBUG [RS:0;80c69eb3c456:42927 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/80c69eb3c456:0, corePoolSize=1, maxPoolSize=1 2024-12-09T17:21:08,104 DEBUG [RS:0;80c69eb3c456:42927 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/80c69eb3c456:0, corePoolSize=1, maxPoolSize=1 2024-12-09T17:21:08,104 DEBUG [RS:0;80c69eb3c456:42927 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/80c69eb3c456:0, corePoolSize=1, maxPoolSize=1 2024-12-09T17:21:08,104 DEBUG [RS:0;80c69eb3c456:42927 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/80c69eb3c456:0, corePoolSize=1, maxPoolSize=1 2024-12-09T17:21:08,104 DEBUG [RS:0;80c69eb3c456:42927 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/80c69eb3c456:0, corePoolSize=1, maxPoolSize=1 2024-12-09T17:21:08,104 DEBUG [RS:0;80c69eb3c456:42927 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/80c69eb3c456:0, corePoolSize=3, maxPoolSize=3 2024-12-09T17:21:08,104 DEBUG [RS:0;80c69eb3c456:42927 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0, corePoolSize=3, maxPoolSize=3 2024-12-09T17:21:08,105 INFO [RS:0;80c69eb3c456:42927 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T17:21:08,105 INFO [RS:0;80c69eb3c456:42927 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T17:21:08,106 INFO [RS:0;80c69eb3c456:42927 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T17:21:08,106 INFO [RS:0;80c69eb3c456:42927 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T17:21:08,106 INFO [RS:0;80c69eb3c456:42927 {}] hbase.ChoreService(168): Chore ScheduledChore name=80c69eb3c456,42927,1733764865379-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T17:21:08,124 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T17:21:08,126 INFO [RS:0;80c69eb3c456:42927 {}] hbase.ChoreService(168): Chore ScheduledChore name=80c69eb3c456,42927,1733764865379-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T17:21:08,143 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.Replication(204): 80c69eb3c456,42927,1733764865379 started 2024-12-09T17:21:08,143 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(1767): Serving as 80c69eb3c456,42927,1733764865379, RpcServer on 80c69eb3c456/172.17.0.2:42927, sessionid=0x1000bcf74250001 2024-12-09T17:21:08,144 DEBUG [RS:0;80c69eb3c456:42927 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T17:21:08,144 DEBUG [RS:0;80c69eb3c456:42927 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:08,144 DEBUG [RS:0;80c69eb3c456:42927 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '80c69eb3c456,42927,1733764865379' 2024-12-09T17:21:08,144 DEBUG [RS:0;80c69eb3c456:42927 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T17:21:08,145 DEBUG [RS:0;80c69eb3c456:42927 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T17:21:08,146 DEBUG [RS:0;80c69eb3c456:42927 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T17:21:08,146 DEBUG [RS:0;80c69eb3c456:42927 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T17:21:08,146 DEBUG [RS:0;80c69eb3c456:42927 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:08,146 DEBUG [RS:0;80c69eb3c456:42927 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '80c69eb3c456,42927,1733764865379' 2024-12-09T17:21:08,146 DEBUG [RS:0;80c69eb3c456:42927 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T17:21:08,147 DEBUG [RS:0;80c69eb3c456:42927 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T17:21:08,147 DEBUG [RS:0;80c69eb3c456:42927 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T17:21:08,147 INFO [RS:0;80c69eb3c456:42927 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T17:21:08,147 INFO [RS:0;80c69eb3c456:42927 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T17:21:08,251 INFO [RS:0;80c69eb3c456:42927 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T17:21:08,255 INFO [RS:0;80c69eb3c456:42927 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=80c69eb3c456%2C42927%2C1733764865379, suffix=, logDir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/WALs/80c69eb3c456,42927,1733764865379, archiveDir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/oldWALs, maxLogs=32 2024-12-09T17:21:08,268 DEBUG [RS:0;80c69eb3c456:42927 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/WALs/80c69eb3c456,42927,1733764865379/80c69eb3c456%2C42927%2C1733764865379.1733764868257, exclude list is [], retry=0 2024-12-09T17:21:08,272 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38771,DS-b480ce71-6cf4-4f94-bfb0-6da93c8b9c35,DISK] 2024-12-09T17:21:08,275 INFO [RS:0;80c69eb3c456:42927 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/WALs/80c69eb3c456,42927,1733764865379/80c69eb3c456%2C42927%2C1733764865379.1733764868257 2024-12-09T17:21:08,275 DEBUG [RS:0;80c69eb3c456:42927 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45197:45197)] 2024-12-09T17:21:08,333 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T17:21:08,336 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T17:21:08,339 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T17:21:08,339 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:08,340 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T17:21:08,340 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T17:21:08,343 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T17:21:08,343 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:08,344 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T17:21:08,344 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T17:21:08,346 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T17:21:08,346 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:08,347 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T17:21:08,349 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/meta/1588230740 2024-12-09T17:21:08,350 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/meta/1588230740 2024-12-09T17:21:08,352 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T17:21:08,354 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-09T17:21:08,358 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T17:21:08,359 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64265384, jitterRate=-0.042371153831481934}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T17:21:08,361 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-09T17:21:08,361 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-09T17:21:08,361 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-09T17:21:08,361 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-09T17:21:08,361 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T17:21:08,361 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T17:21:08,362 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-09T17:21:08,362 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-09T17:21:08,365 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-09T17:21:08,365 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-09T17:21:08,369 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T17:21:08,376 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T17:21:08,378 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T17:21:08,529 DEBUG [80c69eb3c456:45541 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T17:21:08,533 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:08,537 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 80c69eb3c456,42927,1733764865379, state=OPENING 2024-12-09T17:21:08,550 DEBUG [PEWorker-5 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T17:21:08,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T17:21:08,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42927-0x1000bcf74250001, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T17:21:08,559 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T17:21:08,559 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T17:21:08,561 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=80c69eb3c456,42927,1733764865379}] 2024-12-09T17:21:08,736 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:08,738 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T17:21:08,742 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53290, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T17:21:08,756 INFO [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-09T17:21:08,756 INFO [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T17:21:08,757 INFO [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-09T17:21:08,761 INFO [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=80c69eb3c456%2C42927%2C1733764865379.meta, suffix=.meta, logDir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/WALs/80c69eb3c456,42927,1733764865379, archiveDir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/oldWALs, maxLogs=32 2024-12-09T17:21:08,777 DEBUG [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/WALs/80c69eb3c456,42927,1733764865379/80c69eb3c456%2C42927%2C1733764865379.meta.1733764868762.meta, exclude list is [], retry=0 2024-12-09T17:21:08,781 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38771,DS-b480ce71-6cf4-4f94-bfb0-6da93c8b9c35,DISK] 2024-12-09T17:21:08,785 INFO [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/WALs/80c69eb3c456,42927,1733764865379/80c69eb3c456%2C42927%2C1733764865379.meta.1733764868762.meta 2024-12-09T17:21:08,785 DEBUG [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45197:45197)] 2024-12-09T17:21:08,786 DEBUG [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T17:21:08,787 DEBUG [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T17:21:08,846 DEBUG [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T17:21:08,851 INFO [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T17:21:08,856 DEBUG [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T17:21:08,856 DEBUG [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T17:21:08,856 DEBUG [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-09T17:21:08,856 DEBUG [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-09T17:21:08,860 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T17:21:08,863 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T17:21:08,863 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:08,864 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T17:21:08,865 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T17:21:08,867 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T17:21:08,867 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:08,868 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T17:21:08,868 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T17:21:08,871 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T17:21:08,871 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:08,873 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T17:21:08,876 DEBUG [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/meta/1588230740 2024-12-09T17:21:08,881 DEBUG [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/meta/1588230740 2024-12-09T17:21:08,885 DEBUG [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T17:21:08,888 DEBUG [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-09T17:21:08,890 INFO [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63185938, jitterRate=-0.05845615267753601}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T17:21:08,892 DEBUG [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-09T17:21:08,898 INFO [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733764868730 2024-12-09T17:21:08,910 DEBUG [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T17:21:08,911 INFO [RS_OPEN_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-09T17:21:08,912 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:08,914 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 80c69eb3c456,42927,1733764865379, state=OPEN 2024-12-09T17:21:09,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42927-0x1000bcf74250001, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T17:21:09,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T17:21:09,055 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T17:21:09,055 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T17:21:09,060 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T17:21:09,061 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=80c69eb3c456,42927,1733764865379 in 494 msec 2024-12-09T17:21:09,066 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T17:21:09,067 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 692 msec 2024-12-09T17:21:09,072 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.2450 sec 2024-12-09T17:21:09,072 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733764869072, completionTime=-1 2024-12-09T17:21:09,072 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T17:21:09,073 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-09T17:21:09,109 DEBUG [hconnection-0x7e149453-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:21:09,111 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53304, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:21:09,125 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-09T17:21:09,126 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733764929125 2024-12-09T17:21:09,126 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733764989126 2024-12-09T17:21:09,126 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 53 msec 2024-12-09T17:21:09,160 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=80c69eb3c456,45541,1733764864652-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T17:21:09,160 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=80c69eb3c456,45541,1733764864652-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T17:21:09,161 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=80c69eb3c456,45541,1733764864652-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T17:21:09,162 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-80c69eb3c456:45541, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T17:21:09,162 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T17:21:09,168 DEBUG [master/80c69eb3c456:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-09T17:21:09,170 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-09T17:21:09,172 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T17:21:09,178 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-09T17:21:09,181 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T17:21:09,182 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:09,184 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T17:21:09,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741835_1011 (size=358) 2024-12-09T17:21:09,600 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ad9a19a7365c7aeecc9593a7078cfd44, NAME => 'hbase:namespace,,1733764869171.ad9a19a7365c7aeecc9593a7078cfd44.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4 2024-12-09T17:21:09,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741836_1012 (size=42) 2024-12-09T17:21:09,611 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733764869171.ad9a19a7365c7aeecc9593a7078cfd44.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T17:21:09,611 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing ad9a19a7365c7aeecc9593a7078cfd44, disabling compactions & flushes 2024-12-09T17:21:09,612 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733764869171.ad9a19a7365c7aeecc9593a7078cfd44. 2024-12-09T17:21:09,612 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733764869171.ad9a19a7365c7aeecc9593a7078cfd44. 2024-12-09T17:21:09,612 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733764869171.ad9a19a7365c7aeecc9593a7078cfd44. after waiting 0 ms 2024-12-09T17:21:09,612 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733764869171.ad9a19a7365c7aeecc9593a7078cfd44. 2024-12-09T17:21:09,612 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733764869171.ad9a19a7365c7aeecc9593a7078cfd44. 2024-12-09T17:21:09,612 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for ad9a19a7365c7aeecc9593a7078cfd44: 2024-12-09T17:21:09,614 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T17:21:09,621 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733764869171.ad9a19a7365c7aeecc9593a7078cfd44.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733764869616"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733764869616"}]},"ts":"1733764869616"} 2024-12-09T17:21:09,644 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-09T17:21:09,646 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T17:21:09,667 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733764869646"}]},"ts":"1733764869646"} 2024-12-09T17:21:09,671 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-09T17:21:09,702 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=ad9a19a7365c7aeecc9593a7078cfd44, ASSIGN}] 2024-12-09T17:21:09,704 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=ad9a19a7365c7aeecc9593a7078cfd44, ASSIGN 2024-12-09T17:21:09,706 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=ad9a19a7365c7aeecc9593a7078cfd44, ASSIGN; state=OFFLINE, location=80c69eb3c456,42927,1733764865379; forceNewPlan=false, retain=false 2024-12-09T17:21:09,856 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=ad9a19a7365c7aeecc9593a7078cfd44, regionState=OPENING, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:09,862 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure ad9a19a7365c7aeecc9593a7078cfd44, server=80c69eb3c456,42927,1733764865379}] 2024-12-09T17:21:10,016 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:10,023 INFO [RS_OPEN_PRIORITY_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733764869171.ad9a19a7365c7aeecc9593a7078cfd44. 2024-12-09T17:21:10,023 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => ad9a19a7365c7aeecc9593a7078cfd44, NAME => 'hbase:namespace,,1733764869171.ad9a19a7365c7aeecc9593a7078cfd44.', STARTKEY => '', ENDKEY => ''} 2024-12-09T17:21:10,024 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace ad9a19a7365c7aeecc9593a7078cfd44 2024-12-09T17:21:10,024 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733764869171.ad9a19a7365c7aeecc9593a7078cfd44.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T17:21:10,024 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for ad9a19a7365c7aeecc9593a7078cfd44 2024-12-09T17:21:10,025 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for ad9a19a7365c7aeecc9593a7078cfd44 2024-12-09T17:21:10,028 INFO [StoreOpener-ad9a19a7365c7aeecc9593a7078cfd44-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ad9a19a7365c7aeecc9593a7078cfd44 2024-12-09T17:21:10,031 INFO [StoreOpener-ad9a19a7365c7aeecc9593a7078cfd44-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ad9a19a7365c7aeecc9593a7078cfd44 columnFamilyName info 2024-12-09T17:21:10,031 DEBUG [StoreOpener-ad9a19a7365c7aeecc9593a7078cfd44-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:10,032 INFO [StoreOpener-ad9a19a7365c7aeecc9593a7078cfd44-1 {}] regionserver.HStore(327): Store=ad9a19a7365c7aeecc9593a7078cfd44/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:21:10,033 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/namespace/ad9a19a7365c7aeecc9593a7078cfd44 2024-12-09T17:21:10,034 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/namespace/ad9a19a7365c7aeecc9593a7078cfd44 2024-12-09T17:21:10,038 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for ad9a19a7365c7aeecc9593a7078cfd44 2024-12-09T17:21:10,042 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/namespace/ad9a19a7365c7aeecc9593a7078cfd44/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T17:21:10,043 INFO [RS_OPEN_PRIORITY_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened ad9a19a7365c7aeecc9593a7078cfd44; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71186149, jitterRate=0.06075628101825714}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T17:21:10,044 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for ad9a19a7365c7aeecc9593a7078cfd44: 2024-12-09T17:21:10,046 INFO [RS_OPEN_PRIORITY_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733764869171.ad9a19a7365c7aeecc9593a7078cfd44., pid=6, masterSystemTime=1733764870016 2024-12-09T17:21:10,049 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733764869171.ad9a19a7365c7aeecc9593a7078cfd44. 2024-12-09T17:21:10,049 INFO [RS_OPEN_PRIORITY_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733764869171.ad9a19a7365c7aeecc9593a7078cfd44. 2024-12-09T17:21:10,050 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=ad9a19a7365c7aeecc9593a7078cfd44, regionState=OPEN, openSeqNum=2, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:10,058 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T17:21:10,060 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure ad9a19a7365c7aeecc9593a7078cfd44, server=80c69eb3c456,42927,1733764865379 in 192 msec 2024-12-09T17:21:10,063 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T17:21:10,063 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=ad9a19a7365c7aeecc9593a7078cfd44, ASSIGN in 357 msec 2024-12-09T17:21:10,064 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T17:21:10,065 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733764870064"}]},"ts":"1733764870064"} 2024-12-09T17:21:10,067 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-09T17:21:10,110 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-09T17:21:10,111 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T17:21:10,115 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 939 msec 2024-12-09T17:21:10,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42927-0x1000bcf74250001, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T17:21:10,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-09T17:21:10,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T17:21:10,223 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-09T17:21:10,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-09T17:21:10,264 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 43 msec 2024-12-09T17:21:10,267 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-09T17:21:10,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-09T17:21:10,304 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 36 msec 2024-12-09T17:21:10,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-09T17:21:10,341 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-09T17:21:10,342 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.874sec 2024-12-09T17:21:10,343 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T17:21:10,345 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T17:21:10,346 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T17:21:10,347 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T17:21:10,347 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T17:21:10,348 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=80c69eb3c456,45541,1733764864652-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T17:21:10,348 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=80c69eb3c456,45541,1733764864652-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T17:21:10,355 DEBUG [master/80c69eb3c456:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-09T17:21:10,356 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T17:21:10,356 INFO [master/80c69eb3c456:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=80c69eb3c456,45541,1733764864652-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T17:21:10,444 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6169df5c to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a3c3fb3 2024-12-09T17:21:10,444 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-09T17:21:10,460 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@523a59d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:10,464 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-09T17:21:10,464 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-09T17:21:10,474 DEBUG [hconnection-0x46f0e06c-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:21:10,482 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36738, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:21:10,491 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=80c69eb3c456,45541,1733764864652 2024-12-09T17:21:10,504 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=185, ProcessCount=11, AvailableMemoryMB=4795 2024-12-09T17:21:10,514 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T17:21:10,517 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54862, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T17:21:10,524 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-09T17:21:10,528 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T17:21:10,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-09T17:21:10,533 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T17:21:10,533 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:10,533 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-12-09T17:21:10,535 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T17:21:10,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-09T17:21:10,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741837_1013 (size=963) 2024-12-09T17:21:10,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-09T17:21:10,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-09T17:21:10,954 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4 2024-12-09T17:21:10,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741838_1014 (size=53) 2024-12-09T17:21:11,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-09T17:21:11,367 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T17:21:11,367 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 326764652e67b313fc217edc01a9dfcb, disabling compactions & flushes 2024-12-09T17:21:11,367 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:11,367 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:11,367 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. after waiting 0 ms 2024-12-09T17:21:11,367 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:11,367 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:11,367 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:11,369 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T17:21:11,369 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733764871369"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733764871369"}]},"ts":"1733764871369"} 2024-12-09T17:21:11,372 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-09T17:21:11,374 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T17:21:11,374 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733764871374"}]},"ts":"1733764871374"} 2024-12-09T17:21:11,376 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-09T17:21:11,433 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=326764652e67b313fc217edc01a9dfcb, ASSIGN}] 2024-12-09T17:21:11,435 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=326764652e67b313fc217edc01a9dfcb, ASSIGN 2024-12-09T17:21:11,436 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=326764652e67b313fc217edc01a9dfcb, ASSIGN; state=OFFLINE, location=80c69eb3c456,42927,1733764865379; forceNewPlan=false, retain=false 2024-12-09T17:21:11,587 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=326764652e67b313fc217edc01a9dfcb, regionState=OPENING, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:11,590 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379}] 2024-12-09T17:21:11,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-09T17:21:11,742 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:11,748 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:11,748 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} 2024-12-09T17:21:11,748 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:11,748 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T17:21:11,748 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:11,749 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:11,750 INFO [StoreOpener-326764652e67b313fc217edc01a9dfcb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:11,753 INFO [StoreOpener-326764652e67b313fc217edc01a9dfcb-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:21:11,754 INFO [StoreOpener-326764652e67b313fc217edc01a9dfcb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 326764652e67b313fc217edc01a9dfcb columnFamilyName A 2024-12-09T17:21:11,754 DEBUG [StoreOpener-326764652e67b313fc217edc01a9dfcb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:11,755 INFO [StoreOpener-326764652e67b313fc217edc01a9dfcb-1 {}] regionserver.HStore(327): Store=326764652e67b313fc217edc01a9dfcb/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:21:11,755 INFO [StoreOpener-326764652e67b313fc217edc01a9dfcb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:11,757 INFO [StoreOpener-326764652e67b313fc217edc01a9dfcb-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:21:11,757 INFO [StoreOpener-326764652e67b313fc217edc01a9dfcb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 326764652e67b313fc217edc01a9dfcb columnFamilyName B 2024-12-09T17:21:11,757 DEBUG [StoreOpener-326764652e67b313fc217edc01a9dfcb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:11,758 INFO [StoreOpener-326764652e67b313fc217edc01a9dfcb-1 {}] regionserver.HStore(327): Store=326764652e67b313fc217edc01a9dfcb/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:21:11,758 INFO [StoreOpener-326764652e67b313fc217edc01a9dfcb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:11,760 INFO [StoreOpener-326764652e67b313fc217edc01a9dfcb-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:21:11,760 INFO [StoreOpener-326764652e67b313fc217edc01a9dfcb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 326764652e67b313fc217edc01a9dfcb columnFamilyName C 2024-12-09T17:21:11,760 DEBUG [StoreOpener-326764652e67b313fc217edc01a9dfcb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:11,761 INFO [StoreOpener-326764652e67b313fc217edc01a9dfcb-1 {}] regionserver.HStore(327): Store=326764652e67b313fc217edc01a9dfcb/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:21:11,761 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:11,763 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:11,764 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:11,766 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T17:21:11,768 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:11,772 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T17:21:11,773 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 326764652e67b313fc217edc01a9dfcb; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70123557, jitterRate=0.04492242634296417}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T17:21:11,774 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:11,776 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., pid=11, masterSystemTime=1733764871742 2024-12-09T17:21:11,779 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:11,779 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:11,779 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=326764652e67b313fc217edc01a9dfcb, regionState=OPEN, openSeqNum=2, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:11,785 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-09T17:21:11,786 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 in 192 msec 2024-12-09T17:21:11,788 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-09T17:21:11,788 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=326764652e67b313fc217edc01a9dfcb, ASSIGN in 352 msec 2024-12-09T17:21:11,789 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T17:21:11,789 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733764871789"}]},"ts":"1733764871789"} 2024-12-09T17:21:11,792 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-09T17:21:11,835 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T17:21:11,838 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.3070 sec 2024-12-09T17:21:12,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-09T17:21:12,653 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-12-09T17:21:12,657 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x038196d7 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e59596a 2024-12-09T17:21:12,701 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30640414, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:12,703 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:21:12,705 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36752, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:21:12,708 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T17:21:12,710 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54878, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T17:21:12,717 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x28808bb9 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2cac4303 2024-12-09T17:21:12,726 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@536a4a58, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:12,727 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x774bf929 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39b10898 2024-12-09T17:21:12,734 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18751c86, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:12,735 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x54af89df to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1d7115de 2024-12-09T17:21:12,742 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2dd0bbda, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:12,743 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x251efa5e to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@30d4d4c6 2024-12-09T17:21:12,751 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c57419f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:12,752 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x67f7d3d3 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@54c943d 2024-12-09T17:21:12,759 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@435176b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:12,761 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4bf8e82a to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f0c7188 2024-12-09T17:21:12,768 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e957ecd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:12,769 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3ba01639 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@475ca0f4 2024-12-09T17:21:12,776 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22daddc4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:12,777 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x24890c79 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@50c9c1d1 2024-12-09T17:21:12,784 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39028e20, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:12,785 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51cab508 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4f1331a9 2024-12-09T17:21:12,792 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@624dc5e5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:12,800 DEBUG [hconnection-0xabff582-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:21:12,800 DEBUG [hconnection-0x2e14f92e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:21:12,801 DEBUG [hconnection-0x1edc7dcf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:21:12,801 DEBUG [hconnection-0x38bda5c0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:21:12,802 DEBUG [hconnection-0x1b171e57-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:21:12,803 DEBUG [hconnection-0x53bc7585-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:21:12,804 DEBUG [hconnection-0x76910286-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:21:12,804 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:21:12,805 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36756, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:21:12,806 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36772, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:21:12,808 DEBUG [hconnection-0x13693b96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:21:12,809 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36778, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:21:12,809 DEBUG [hconnection-0x588b494f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:21:12,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-12-09T17:21:12,812 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:21:12,814 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36794, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:21:12,815 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:21:12,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-09T17:21:12,817 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:21:12,834 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36804, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:21:12,837 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36808, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:21:12,841 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36812, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:21:12,864 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36822, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:21:12,882 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36836, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:21:12,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:12,893 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-09T17:21:12,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:12,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:12,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:12,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:12,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:12,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:12,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-09T17:21:13,001 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,003 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-09T17:21:13,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:13,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:13,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:13,021 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:13,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:13,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:13,058 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764933034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,060 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764933034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,061 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/36d940e2a3a34dd7b13dea64b335fb0e is 50, key is test_row_0/A:col10/1733764872863/Put/seqid=0 2024-12-09T17:21:13,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764933035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764933043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764933044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741839_1015 (size=16681) 2024-12-09T17:21:13,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-09T17:21:13,123 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/36d940e2a3a34dd7b13dea64b335fb0e 2024-12-09T17:21:13,167 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764933165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764933166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,169 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764933169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764933167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,175 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764933173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,196 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,197 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-09T17:21:13,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:13,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:13,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:13,207 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:13,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:13,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:13,264 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/debc8c9ade334d319d34438bcbfaaf4a is 50, key is test_row_0/B:col10/1733764872863/Put/seqid=0 2024-12-09T17:21:13,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741840_1016 (size=12001) 2024-12-09T17:21:13,299 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/debc8c9ade334d319d34438bcbfaaf4a 2024-12-09T17:21:13,346 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/4f776badca464780873fb439252af78f is 50, key is test_row_0/C:col10/1733764872863/Put/seqid=0 2024-12-09T17:21:13,364 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,365 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-09T17:21:13,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:13,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:13,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:13,368 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:13,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:13,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:13,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764933373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,377 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764933373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,378 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764933374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764933381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741841_1017 (size=12001) 2024-12-09T17:21:13,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764933381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,385 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/4f776badca464780873fb439252af78f 2024-12-09T17:21:13,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/36d940e2a3a34dd7b13dea64b335fb0e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/36d940e2a3a34dd7b13dea64b335fb0e 2024-12-09T17:21:13,424 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/36d940e2a3a34dd7b13dea64b335fb0e, entries=250, sequenceid=13, filesize=16.3 K 2024-12-09T17:21:13,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-09T17:21:13,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/debc8c9ade334d319d34438bcbfaaf4a as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/debc8c9ade334d319d34438bcbfaaf4a 2024-12-09T17:21:13,443 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/debc8c9ade334d319d34438bcbfaaf4a, entries=150, sequenceid=13, filesize=11.7 K 2024-12-09T17:21:13,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/4f776badca464780873fb439252af78f as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/4f776badca464780873fb439252af78f 2024-12-09T17:21:13,465 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/4f776badca464780873fb439252af78f, entries=150, sequenceid=13, filesize=11.7 K 2024-12-09T17:21:13,474 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 326764652e67b313fc217edc01a9dfcb in 581ms, sequenceid=13, compaction requested=false 2024-12-09T17:21:13,475 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-09T17:21:13,477 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:13,526 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,527 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-09T17:21:13,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:13,527 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-09T17:21:13,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:13,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:13,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:13,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:13,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:13,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:13,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/8fc7f4549fde40c298aa4a10d05e6203 is 50, key is test_row_0/A:col10/1733764873022/Put/seqid=0 2024-12-09T17:21:13,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741842_1018 (size=12001) 2024-12-09T17:21:13,569 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/8fc7f4549fde40c298aa4a10d05e6203 2024-12-09T17:21:13,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/c661f75b2cd64b4aa7aa1ac6fb28324f is 50, key is test_row_0/B:col10/1733764873022/Put/seqid=0 2024-12-09T17:21:13,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741843_1019 (size=12001) 2024-12-09T17:21:13,658 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/c661f75b2cd64b4aa7aa1ac6fb28324f 2024-12-09T17:21:13,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/06d73677d2c74626ba348a1ac68a762c is 50, key is test_row_0/C:col10/1733764873022/Put/seqid=0 2024-12-09T17:21:13,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:13,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:13,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764933708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,717 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764933708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764933709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764933711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764933714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741844_1020 (size=12001) 2024-12-09T17:21:13,747 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/06d73677d2c74626ba348a1ac68a762c 2024-12-09T17:21:13,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/8fc7f4549fde40c298aa4a10d05e6203 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/8fc7f4549fde40c298aa4a10d05e6203 2024-12-09T17:21:13,776 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/8fc7f4549fde40c298aa4a10d05e6203, entries=150, sequenceid=38, filesize=11.7 K 2024-12-09T17:21:13,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/c661f75b2cd64b4aa7aa1ac6fb28324f as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/c661f75b2cd64b4aa7aa1ac6fb28324f 2024-12-09T17:21:13,792 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/c661f75b2cd64b4aa7aa1ac6fb28324f, entries=150, sequenceid=38, filesize=11.7 K 2024-12-09T17:21:13,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/06d73677d2c74626ba348a1ac68a762c as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/06d73677d2c74626ba348a1ac68a762c 2024-12-09T17:21:13,820 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/06d73677d2c74626ba348a1ac68a762c, entries=150, sequenceid=38, filesize=11.7 K 2024-12-09T17:21:13,823 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 326764652e67b313fc217edc01a9dfcb in 295ms, sequenceid=38, compaction requested=false 2024-12-09T17:21:13,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:13,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:13,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-09T17:21:13,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-09T17:21:13,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:13,831 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-09T17:21:13,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:13,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:13,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:13,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:13,834 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-09T17:21:13,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:13,834 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0110 sec 2024-12-09T17:21:13,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:13,839 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.0290 sec 2024-12-09T17:21:13,850 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/28af4dcccb54490d828508ac38a991d6 is 50, key is test_row_0/A:col10/1733764873709/Put/seqid=0 2024-12-09T17:21:13,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741845_1021 (size=12001) 2024-12-09T17:21:13,887 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/28af4dcccb54490d828508ac38a991d6 2024-12-09T17:21:13,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764933898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764933903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764933903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764933909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,917 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:13,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764933912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:13,919 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/aa12b89bbd6644f18357bfa8f89f4dc1 is 50, key is test_row_0/B:col10/1733764873709/Put/seqid=0 2024-12-09T17:21:13,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-09T17:21:13,927 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-12-09T17:21:13,931 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:21:13,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-12-09T17:21:13,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-09T17:21:13,935 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:21:13,937 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:21:13,937 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:21:13,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741846_1022 (size=12001) 2024-12-09T17:21:14,015 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764934014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764934014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,019 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764934015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,020 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764934015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,021 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764934020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-09T17:21:14,090 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,091 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-09T17:21:14,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:14,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:14,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:14,092 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:14,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:14,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:14,226 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764934222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764934222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764934223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764934225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-09T17:21:14,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764934241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,246 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,247 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-09T17:21:14,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:14,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:14,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:14,248 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:14,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:14,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:14,359 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/aa12b89bbd6644f18357bfa8f89f4dc1 2024-12-09T17:21:14,370 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-09T17:21:14,394 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/60c86e9ed0fb4c62a0d1aee538f742de is 50, key is test_row_0/C:col10/1733764873709/Put/seqid=0 2024-12-09T17:21:14,401 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,402 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-09T17:21:14,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:14,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:14,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:14,403 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:14,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:14,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:14,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741847_1023 (size=12001) 2024-12-09T17:21:14,415 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/60c86e9ed0fb4c62a0d1aee538f742de 2024-12-09T17:21:14,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/28af4dcccb54490d828508ac38a991d6 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/28af4dcccb54490d828508ac38a991d6 2024-12-09T17:21:14,444 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/28af4dcccb54490d828508ac38a991d6, entries=150, sequenceid=52, filesize=11.7 K 2024-12-09T17:21:14,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/aa12b89bbd6644f18357bfa8f89f4dc1 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/aa12b89bbd6644f18357bfa8f89f4dc1 2024-12-09T17:21:14,448 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T17:21:14,452 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-09T17:21:14,461 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/aa12b89bbd6644f18357bfa8f89f4dc1, entries=150, sequenceid=52, filesize=11.7 K 2024-12-09T17:21:14,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/60c86e9ed0fb4c62a0d1aee538f742de as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/60c86e9ed0fb4c62a0d1aee538f742de 2024-12-09T17:21:14,481 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/60c86e9ed0fb4c62a0d1aee538f742de, entries=150, sequenceid=52, filesize=11.7 K 2024-12-09T17:21:14,483 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 326764652e67b313fc217edc01a9dfcb in 651ms, sequenceid=52, compaction requested=true 2024-12-09T17:21:14,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:14,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:21:14,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:14,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:21:14,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:14,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:21:14,501 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:14,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:14,505 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:14,505 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:14,507 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/B is initiating minor compaction (all files) 2024-12-09T17:21:14,507 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/B in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:14,507 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:14,507 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/debc8c9ade334d319d34438bcbfaaf4a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/c661f75b2cd64b4aa7aa1ac6fb28324f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/aa12b89bbd6644f18357bfa8f89f4dc1] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=35.2 K 2024-12-09T17:21:14,507 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/A is initiating minor compaction (all files) 2024-12-09T17:21:14,508 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/A in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:14,508 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/36d940e2a3a34dd7b13dea64b335fb0e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/8fc7f4549fde40c298aa4a10d05e6203, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/28af4dcccb54490d828508ac38a991d6] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=39.7 K 2024-12-09T17:21:14,510 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 36d940e2a3a34dd7b13dea64b335fb0e, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733764872832 2024-12-09T17:21:14,509 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting debc8c9ade334d319d34438bcbfaaf4a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733764872863 2024-12-09T17:21:14,511 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8fc7f4549fde40c298aa4a10d05e6203, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733764873022 2024-12-09T17:21:14,511 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting c661f75b2cd64b4aa7aa1ac6fb28324f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733764873022 2024-12-09T17:21:14,511 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 28af4dcccb54490d828508ac38a991d6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733764873702 2024-12-09T17:21:14,512 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting aa12b89bbd6644f18357bfa8f89f4dc1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733764873702 2024-12-09T17:21:14,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-09T17:21:14,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:14,556 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-09T17:21:14,557 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-09T17:21:14,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:14,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:14,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:14,558 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:14,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:14,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:14,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:14,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:14,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:14,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:14,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:14,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:14,569 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#A#compaction#10 average throughput is 0.82 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:14,570 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/e96bf0fa5736429f992a9a1be7685c19 is 50, key is test_row_0/A:col10/1733764873709/Put/seqid=0 2024-12-09T17:21:14,577 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#B#compaction#9 average throughput is 0.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:14,578 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/d4c3169fc0064982aaf0cac49ad58369 is 50, key is test_row_0/B:col10/1733764873709/Put/seqid=0 2024-12-09T17:21:14,592 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/c5c4dba065f547cc83000823df56b469 is 50, key is test_row_0/A:col10/1733764873898/Put/seqid=0 2024-12-09T17:21:14,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764934570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764934573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764934573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764934582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764934585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741848_1024 (size=12104) 2024-12-09T17:21:14,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741849_1025 (size=12104) 2024-12-09T17:21:14,666 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/e96bf0fa5736429f992a9a1be7685c19 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/e96bf0fa5736429f992a9a1be7685c19 2024-12-09T17:21:14,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741850_1026 (size=12001) 2024-12-09T17:21:14,687 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/A of 326764652e67b313fc217edc01a9dfcb into e96bf0fa5736429f992a9a1be7685c19(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:14,687 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:14,688 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/A, priority=13, startTime=1733764874485; duration=0sec 2024-12-09T17:21:14,688 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:14,688 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:A 2024-12-09T17:21:14,689 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:14,697 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:14,697 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/C is initiating minor compaction (all files) 2024-12-09T17:21:14,697 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/C in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:14,697 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/4f776badca464780873fb439252af78f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/06d73677d2c74626ba348a1ac68a762c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/60c86e9ed0fb4c62a0d1aee538f742de] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=35.2 K 2024-12-09T17:21:14,699 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f776badca464780873fb439252af78f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733764872863 2024-12-09T17:21:14,701 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06d73677d2c74626ba348a1ac68a762c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733764873022 2024-12-09T17:21:14,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764934698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,704 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 60c86e9ed0fb4c62a0d1aee538f742de, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733764873702 2024-12-09T17:21:14,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764934699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764934700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,717 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,718 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-09T17:21:14,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:14,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:14,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:14,718 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:14,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:14,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:14,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764934713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,726 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764934712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,742 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#C#compaction#12 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:14,743 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/ba8cb8f160224447a4631c6642b87384 is 50, key is test_row_0/C:col10/1733764873709/Put/seqid=0 2024-12-09T17:21:14,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741851_1027 (size=12104) 2024-12-09T17:21:14,773 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/ba8cb8f160224447a4631c6642b87384 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/ba8cb8f160224447a4631c6642b87384 2024-12-09T17:21:14,788 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/C of 326764652e67b313fc217edc01a9dfcb into ba8cb8f160224447a4631c6642b87384(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:14,788 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:14,788 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/C, priority=13, startTime=1733764874500; duration=0sec 2024-12-09T17:21:14,788 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:14,789 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:C 2024-12-09T17:21:14,872 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,872 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-09T17:21:14,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:14,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:14,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:14,873 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:14,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:14,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:14,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764934908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764934911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,917 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764934913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,936 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764934932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:14,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:14,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764934932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:15,027 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:15,028 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-09T17:21:15,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:15,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:15,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:15,029 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:15,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:15,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:15,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-09T17:21:15,053 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/d4c3169fc0064982aaf0cac49ad58369 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/d4c3169fc0064982aaf0cac49ad58369 2024-12-09T17:21:15,056 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T17:21:15,056 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T17:21:15,059 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-09T17:21:15,059 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-09T17:21:15,061 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T17:21:15,061 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T17:21:15,061 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T17:21:15,061 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T17:21:15,064 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-09T17:21:15,064 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-09T17:21:15,075 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/B of 326764652e67b313fc217edc01a9dfcb into d4c3169fc0064982aaf0cac49ad58369(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:15,075 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:15,076 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/B, priority=13, startTime=1733764874500; duration=0sec 2024-12-09T17:21:15,076 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:15,076 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:B 2024-12-09T17:21:15,081 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/c5c4dba065f547cc83000823df56b469 2024-12-09T17:21:15,118 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/e26b76c812f349d2a9ada11001d0d4d4 is 50, key is test_row_0/B:col10/1733764873898/Put/seqid=0 2024-12-09T17:21:15,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741852_1028 (size=12001) 2024-12-09T17:21:15,147 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/e26b76c812f349d2a9ada11001d0d4d4 2024-12-09T17:21:15,172 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/253e2c9adf4843c6911d2695264740ef is 50, key is test_row_0/C:col10/1733764873898/Put/seqid=0 2024-12-09T17:21:15,188 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:15,190 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-09T17:21:15,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:15,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:15,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:15,190 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:15,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:15,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741853_1029 (size=12001) 2024-12-09T17:21:15,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:15,194 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/253e2c9adf4843c6911d2695264740ef 2024-12-09T17:21:15,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/c5c4dba065f547cc83000823df56b469 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/c5c4dba065f547cc83000823df56b469 2024-12-09T17:21:15,214 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:15,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764935212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:15,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:15,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764935219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:15,223 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:15,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764935221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:15,231 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/c5c4dba065f547cc83000823df56b469, entries=150, sequenceid=79, filesize=11.7 K 2024-12-09T17:21:15,235 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/e26b76c812f349d2a9ada11001d0d4d4 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e26b76c812f349d2a9ada11001d0d4d4 2024-12-09T17:21:15,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:15,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764935239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:15,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:15,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764935242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:15,251 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e26b76c812f349d2a9ada11001d0d4d4, entries=150, sequenceid=79, filesize=11.7 K 2024-12-09T17:21:15,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/253e2c9adf4843c6911d2695264740ef as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/253e2c9adf4843c6911d2695264740ef 2024-12-09T17:21:15,268 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/253e2c9adf4843c6911d2695264740ef, entries=150, sequenceid=79, filesize=11.7 K 2024-12-09T17:21:15,271 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 326764652e67b313fc217edc01a9dfcb in 714ms, sequenceid=79, compaction requested=false 2024-12-09T17:21:15,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:15,345 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:15,346 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-09T17:21:15,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:15,347 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-09T17:21:15,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:15,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:15,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:15,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:15,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:15,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:15,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/e510462aa0104cf09b6100ef85c91341 is 50, key is test_row_0/A:col10/1733764874581/Put/seqid=0 2024-12-09T17:21:15,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741854_1030 (size=9657) 2024-12-09T17:21:15,393 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/e510462aa0104cf09b6100ef85c91341 2024-12-09T17:21:15,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/4f7224c5ba914322aba0261243d5c966 is 50, key is test_row_0/B:col10/1733764874581/Put/seqid=0 2024-12-09T17:21:15,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741855_1031 (size=9657) 2024-12-09T17:21:15,459 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/4f7224c5ba914322aba0261243d5c966 2024-12-09T17:21:15,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/be02eed04a68446f9526ad5401f152cc is 50, key is test_row_0/C:col10/1733764874581/Put/seqid=0 2024-12-09T17:21:15,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741856_1032 (size=9657) 2024-12-09T17:21:15,516 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/be02eed04a68446f9526ad5401f152cc 2024-12-09T17:21:15,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/e510462aa0104cf09b6100ef85c91341 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/e510462aa0104cf09b6100ef85c91341 2024-12-09T17:21:15,548 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/e510462aa0104cf09b6100ef85c91341, entries=100, sequenceid=91, filesize=9.4 K 2024-12-09T17:21:15,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/4f7224c5ba914322aba0261243d5c966 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/4f7224c5ba914322aba0261243d5c966 2024-12-09T17:21:15,567 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/4f7224c5ba914322aba0261243d5c966, entries=100, sequenceid=91, filesize=9.4 K 2024-12-09T17:21:15,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/be02eed04a68446f9526ad5401f152cc as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/be02eed04a68446f9526ad5401f152cc 2024-12-09T17:21:15,585 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/be02eed04a68446f9526ad5401f152cc, entries=100, sequenceid=91, filesize=9.4 K 2024-12-09T17:21:15,587 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=0 B/0 for 326764652e67b313fc217edc01a9dfcb in 240ms, sequenceid=91, compaction requested=true 2024-12-09T17:21:15,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:15,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:15,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-09T17:21:15,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-09T17:21:15,593 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-09T17:21:15,593 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6520 sec 2024-12-09T17:21:15,599 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.6640 sec 2024-12-09T17:21:15,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:15,758 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-09T17:21:15,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:15,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:15,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:15,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:15,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:15,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:15,769 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/52a6f2d5a0e24ee0ab7c7294e7cb0643 is 50, key is test_row_0/A:col10/1733764875756/Put/seqid=0 2024-12-09T17:21:15,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741857_1033 (size=12001) 2024-12-09T17:21:15,816 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/52a6f2d5a0e24ee0ab7c7294e7cb0643 2024-12-09T17:21:15,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:15,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764935820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:15,833 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:15,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764935823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:15,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:15,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764935825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:15,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:15,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764935829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:15,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:15,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764935832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:15,842 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/ef7b2492e2dc49b6aa52328c5b7981bd is 50, key is test_row_0/B:col10/1733764875756/Put/seqid=0 2024-12-09T17:21:15,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741858_1034 (size=12001) 2024-12-09T17:21:15,878 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/ef7b2492e2dc49b6aa52328c5b7981bd 2024-12-09T17:21:15,909 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/7c757cae816a4d808c76438df8888863 is 50, key is test_row_0/C:col10/1733764875756/Put/seqid=0 2024-12-09T17:21:15,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:15,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764935935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:15,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:15,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764935935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:15,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:15,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764935936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:15,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:15,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764935937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:15,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:15,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764935937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:15,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741859_1035 (size=12001) 2024-12-09T17:21:16,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-09T17:21:16,042 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-12-09T17:21:16,044 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:21:16,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-12-09T17:21:16,049 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:21:16,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-09T17:21:16,050 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:21:16,051 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:21:16,143 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:16,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764936142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:16,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764936144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:16,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764936144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,148 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:16,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764936145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:16,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764936145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-09T17:21:16,205 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,205 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-09T17:21:16,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:16,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:16,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:16,206 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:16,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:16,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:16,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-09T17:21:16,360 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,360 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/7c757cae816a4d808c76438df8888863 2024-12-09T17:21:16,361 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-09T17:21:16,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:16,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:16,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:16,362 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:16,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:16,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:16,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/52a6f2d5a0e24ee0ab7c7294e7cb0643 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/52a6f2d5a0e24ee0ab7c7294e7cb0643 2024-12-09T17:21:16,387 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/52a6f2d5a0e24ee0ab7c7294e7cb0643, entries=150, sequenceid=102, filesize=11.7 K 2024-12-09T17:21:16,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/ef7b2492e2dc49b6aa52328c5b7981bd as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/ef7b2492e2dc49b6aa52328c5b7981bd 2024-12-09T17:21:16,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/ef7b2492e2dc49b6aa52328c5b7981bd, entries=150, sequenceid=102, filesize=11.7 K 2024-12-09T17:21:16,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/7c757cae816a4d808c76438df8888863 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/7c757cae816a4d808c76438df8888863 2024-12-09T17:21:16,425 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/7c757cae816a4d808c76438df8888863, entries=150, sequenceid=102, filesize=11.7 K 2024-12-09T17:21:16,426 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 326764652e67b313fc217edc01a9dfcb in 668ms, sequenceid=102, compaction requested=true 2024-12-09T17:21:16,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:16,427 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T17:21:16,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:21:16,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:16,429 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T17:21:16,430 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45763 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-09T17:21:16,431 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/A is initiating minor compaction (all files) 2024-12-09T17:21:16,432 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/A in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:16,432 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/e96bf0fa5736429f992a9a1be7685c19, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/c5c4dba065f547cc83000823df56b469, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/e510462aa0104cf09b6100ef85c91341, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/52a6f2d5a0e24ee0ab7c7294e7cb0643] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=44.7 K 2024-12-09T17:21:16,434 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting e96bf0fa5736429f992a9a1be7685c19, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733764873702 2024-12-09T17:21:16,435 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45763 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-09T17:21:16,435 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/B is initiating minor compaction (all files) 2024-12-09T17:21:16,435 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/B in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:16,435 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/d4c3169fc0064982aaf0cac49ad58369, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e26b76c812f349d2a9ada11001d0d4d4, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/4f7224c5ba914322aba0261243d5c966, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/ef7b2492e2dc49b6aa52328c5b7981bd] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=44.7 K 2024-12-09T17:21:16,436 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting d4c3169fc0064982aaf0cac49ad58369, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733764873702 2024-12-09T17:21:16,437 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5c4dba065f547cc83000823df56b469, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733764873898 2024-12-09T17:21:16,438 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting e26b76c812f349d2a9ada11001d0d4d4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733764873898 2024-12-09T17:21:16,438 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting e510462aa0104cf09b6100ef85c91341, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733764874564 2024-12-09T17:21:16,440 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f7224c5ba914322aba0261243d5c966, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733764874564 2024-12-09T17:21:16,440 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52a6f2d5a0e24ee0ab7c7294e7cb0643, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1733764875743 2024-12-09T17:21:16,441 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting ef7b2492e2dc49b6aa52328c5b7981bd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1733764875743 2024-12-09T17:21:16,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:21:16,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:16,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:21:16,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:16,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:16,455 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-09T17:21:16,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:16,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:16,458 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:16,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:16,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:16,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:16,479 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/60fdf6be160b42069c1ce2d04609f481 is 50, key is test_row_0/A:col10/1733764876454/Put/seqid=0 2024-12-09T17:21:16,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:16,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764936465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:16,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764936480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:16,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764936480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,490 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:16,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764936484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,498 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#A#compaction#22 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:16,499 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/f67a84b8ce0b4f0a9630bd4a7e572023 is 50, key is test_row_0/A:col10/1733764875756/Put/seqid=0 2024-12-09T17:21:16,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:16,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764936502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,511 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#B#compaction#23 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:16,512 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/eb9a1003d3194cd8a97e8cb91ba10ff2 is 50, key is test_row_0/B:col10/1733764875756/Put/seqid=0 2024-12-09T17:21:16,516 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,517 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-09T17:21:16,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:16,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:16,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:16,520 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:16,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:16,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:16,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741861_1037 (size=12241) 2024-12-09T17:21:16,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741860_1036 (size=16731) 2024-12-09T17:21:16,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741862_1038 (size=12241) 2024-12-09T17:21:16,569 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/f67a84b8ce0b4f0a9630bd4a7e572023 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/f67a84b8ce0b4f0a9630bd4a7e572023 2024-12-09T17:21:16,585 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/A of 326764652e67b313fc217edc01a9dfcb into f67a84b8ce0b4f0a9630bd4a7e572023(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:16,585 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:16,585 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/A, priority=12, startTime=1733764876427; duration=0sec 2024-12-09T17:21:16,585 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:16,585 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:A 2024-12-09T17:21:16,586 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T17:21:16,595 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45763 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-09T17:21:16,595 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/C is initiating minor compaction (all files) 2024-12-09T17:21:16,595 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/C in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:16,595 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:16,596 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/ba8cb8f160224447a4631c6642b87384, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/253e2c9adf4843c6911d2695264740ef, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/be02eed04a68446f9526ad5401f152cc, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/7c757cae816a4d808c76438df8888863] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=44.7 K 2024-12-09T17:21:16,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764936588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:16,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764936589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:16,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764936589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,598 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:16,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764936593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,600 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba8cb8f160224447a4631c6642b87384, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733764873702 2024-12-09T17:21:16,601 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/eb9a1003d3194cd8a97e8cb91ba10ff2 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/eb9a1003d3194cd8a97e8cb91ba10ff2 2024-12-09T17:21:16,603 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 253e2c9adf4843c6911d2695264740ef, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733764873898 2024-12-09T17:21:16,608 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting be02eed04a68446f9526ad5401f152cc, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733764874564 2024-12-09T17:21:16,611 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c757cae816a4d808c76438df8888863, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1733764875743 2024-12-09T17:21:16,612 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:16,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764936608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,622 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/B of 326764652e67b313fc217edc01a9dfcb into eb9a1003d3194cd8a97e8cb91ba10ff2(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:16,622 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:16,622 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/B, priority=12, startTime=1733764876429; duration=0sec 2024-12-09T17:21:16,622 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:16,622 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:B 2024-12-09T17:21:16,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-09T17:21:16,658 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#C#compaction#24 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:16,659 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/8882a91221304dd3b064646cc45d78f0 is 50, key is test_row_0/C:col10/1733764875756/Put/seqid=0 2024-12-09T17:21:16,673 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,674 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-09T17:21:16,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:16,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:16,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:16,675 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:16,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:16,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:16,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741863_1039 (size=12241) 2024-12-09T17:21:16,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:16,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764936797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:16,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764936802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:16,805 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:16,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764936804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764936804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:16,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764936815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,829 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,829 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-09T17:21:16,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:16,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:16,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:16,830 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:16,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:16,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:16,961 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/60fdf6be160b42069c1ce2d04609f481 2024-12-09T17:21:16,977 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/e5266ca72d464d39b12e1434dc67f924 is 50, key is test_row_0/B:col10/1733764876454/Put/seqid=0 2024-12-09T17:21:16,983 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:16,984 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-09T17:21:16,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:16,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:16,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:16,984 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:16,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:16,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741864_1040 (size=12051) 2024-12-09T17:21:16,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:16,987 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/e5266ca72d464d39b12e1434dc67f924 2024-12-09T17:21:17,016 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/d126935431034bf5a2d4177cf878ceef is 50, key is test_row_0/C:col10/1733764876454/Put/seqid=0 2024-12-09T17:21:17,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741865_1041 (size=12051) 2024-12-09T17:21:17,066 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/d126935431034bf5a2d4177cf878ceef 2024-12-09T17:21:17,085 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/60fdf6be160b42069c1ce2d04609f481 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/60fdf6be160b42069c1ce2d04609f481 2024-12-09T17:21:17,095 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/60fdf6be160b42069c1ce2d04609f481, entries=250, sequenceid=129, filesize=16.3 K 2024-12-09T17:21:17,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/e5266ca72d464d39b12e1434dc67f924 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e5266ca72d464d39b12e1434dc67f924 2024-12-09T17:21:17,105 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/8882a91221304dd3b064646cc45d78f0 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/8882a91221304dd3b064646cc45d78f0 2024-12-09T17:21:17,109 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e5266ca72d464d39b12e1434dc67f924, entries=150, sequenceid=129, filesize=11.8 K 2024-12-09T17:21:17,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:17,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/d126935431034bf5a2d4177cf878ceef as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/d126935431034bf5a2d4177cf878ceef 2024-12-09T17:21:17,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764937106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:17,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:17,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764937109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:17,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:17,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764937112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:17,116 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:17,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764937112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:17,119 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/C of 326764652e67b313fc217edc01a9dfcb into 8882a91221304dd3b064646cc45d78f0(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:17,119 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:17,119 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/C, priority=12, startTime=1733764876443; duration=0sec 2024-12-09T17:21:17,119 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:17,119 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:C 2024-12-09T17:21:17,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:17,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764937119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:17,127 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/d126935431034bf5a2d4177cf878ceef, entries=150, sequenceid=129, filesize=11.8 K 2024-12-09T17:21:17,129 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 326764652e67b313fc217edc01a9dfcb in 674ms, sequenceid=129, compaction requested=false 2024-12-09T17:21:17,129 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:17,138 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:17,139 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-09T17:21:17,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:17,140 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-09T17:21:17,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:17,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:17,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:17,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:17,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:17,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:17,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/5990d6fa0dc8491fbfebb457b678e3ef is 50, key is test_row_0/A:col10/1733764876465/Put/seqid=0 2024-12-09T17:21:17,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-09T17:21:17,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741866_1042 (size=12151) 2024-12-09T17:21:17,184 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/5990d6fa0dc8491fbfebb457b678e3ef 2024-12-09T17:21:17,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/13379488dd7a49668c31cbeb09aab1cd is 50, key is test_row_0/B:col10/1733764876465/Put/seqid=0 2024-12-09T17:21:17,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741867_1043 (size=12151) 2024-12-09T17:21:17,236 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/13379488dd7a49668c31cbeb09aab1cd 2024-12-09T17:21:17,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/823b7307e8b947a0832c35088d38933f is 50, key is test_row_0/C:col10/1733764876465/Put/seqid=0 2024-12-09T17:21:17,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741868_1044 (size=12151) 2024-12-09T17:21:17,269 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/823b7307e8b947a0832c35088d38933f 2024-12-09T17:21:17,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/5990d6fa0dc8491fbfebb457b678e3ef as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/5990d6fa0dc8491fbfebb457b678e3ef 2024-12-09T17:21:17,296 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/5990d6fa0dc8491fbfebb457b678e3ef, entries=150, sequenceid=141, filesize=11.9 K 2024-12-09T17:21:17,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/13379488dd7a49668c31cbeb09aab1cd as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/13379488dd7a49668c31cbeb09aab1cd 2024-12-09T17:21:17,310 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/13379488dd7a49668c31cbeb09aab1cd, entries=150, sequenceid=141, filesize=11.9 K 2024-12-09T17:21:17,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/823b7307e8b947a0832c35088d38933f as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/823b7307e8b947a0832c35088d38933f 2024-12-09T17:21:17,335 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/823b7307e8b947a0832c35088d38933f, entries=150, sequenceid=141, filesize=11.9 K 2024-12-09T17:21:17,337 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=0 B/0 for 326764652e67b313fc217edc01a9dfcb in 198ms, sequenceid=141, compaction requested=true 2024-12-09T17:21:17,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:17,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:17,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-09T17:21:17,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-09T17:21:17,346 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-09T17:21:17,346 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2900 sec 2024-12-09T17:21:17,350 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.3030 sec 2024-12-09T17:21:17,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:17,675 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-09T17:21:17,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:17,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:17,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:17,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:17,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:17,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:17,682 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/2f293c21ebef47c5a1003d5c02699b3c is 50, key is test_row_0/A:col10/1733764877624/Put/seqid=0 2024-12-09T17:21:17,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741869_1045 (size=14537) 2024-12-09T17:21:17,704 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/2f293c21ebef47c5a1003d5c02699b3c 2024-12-09T17:21:17,721 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/98e0e95fbd8c44378e47a09e8c90a814 is 50, key is test_row_0/B:col10/1733764877624/Put/seqid=0 2024-12-09T17:21:17,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741870_1046 (size=9757) 2024-12-09T17:21:17,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:17,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764937716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:17,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:17,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764937718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:17,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:17,774 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:17,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764937769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:17,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764937770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:17,774 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:17,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764937769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:17,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:17,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764937870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:17,875 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:17,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764937873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:17,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:17,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764937876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:17,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:17,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764937877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:17,881 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:17,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764937877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:18,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:18,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764938074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:18,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:18,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764938078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:18,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:18,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764938083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:18,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:18,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764938083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:18,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:18,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764938083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:18,131 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/98e0e95fbd8c44378e47a09e8c90a814 2024-12-09T17:21:18,157 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/704caaeb334c4b11af33cf66cb7ff4d5 is 50, key is test_row_0/C:col10/1733764877624/Put/seqid=0 2024-12-09T17:21:18,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-09T17:21:18,159 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-12-09T17:21:18,164 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:21:18,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-12-09T17:21:18,167 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:21:18,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-09T17:21:18,168 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:21:18,168 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:21:18,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741871_1047 (size=9757) 2024-12-09T17:21:18,190 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/704caaeb334c4b11af33cf66cb7ff4d5 2024-12-09T17:21:18,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/2f293c21ebef47c5a1003d5c02699b3c as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/2f293c21ebef47c5a1003d5c02699b3c 2024-12-09T17:21:18,209 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/2f293c21ebef47c5a1003d5c02699b3c, entries=200, sequenceid=153, filesize=14.2 K 2024-12-09T17:21:18,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/98e0e95fbd8c44378e47a09e8c90a814 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/98e0e95fbd8c44378e47a09e8c90a814 2024-12-09T17:21:18,228 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/98e0e95fbd8c44378e47a09e8c90a814, entries=100, sequenceid=153, filesize=9.5 K 2024-12-09T17:21:18,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/704caaeb334c4b11af33cf66cb7ff4d5 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/704caaeb334c4b11af33cf66cb7ff4d5 2024-12-09T17:21:18,243 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/704caaeb334c4b11af33cf66cb7ff4d5, entries=100, sequenceid=153, filesize=9.5 K 2024-12-09T17:21:18,245 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 326764652e67b313fc217edc01a9dfcb in 570ms, sequenceid=153, compaction requested=true 2024-12-09T17:21:18,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:18,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:21:18,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:18,245 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T17:21:18,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:21:18,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:18,245 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T17:21:18,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:21:18,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:18,248 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-09T17:21:18,248 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/A is initiating minor compaction (all files) 2024-12-09T17:21:18,248 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/A in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:18,248 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/f67a84b8ce0b4f0a9630bd4a7e572023, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/60fdf6be160b42069c1ce2d04609f481, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/5990d6fa0dc8491fbfebb457b678e3ef, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/2f293c21ebef47c5a1003d5c02699b3c] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=54.4 K 2024-12-09T17:21:18,248 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46200 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-09T17:21:18,249 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/B is initiating minor compaction (all files) 2024-12-09T17:21:18,249 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/B in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:18,249 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/eb9a1003d3194cd8a97e8cb91ba10ff2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e5266ca72d464d39b12e1434dc67f924, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/13379488dd7a49668c31cbeb09aab1cd, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/98e0e95fbd8c44378e47a09e8c90a814] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=45.1 K 2024-12-09T17:21:18,249 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting f67a84b8ce0b4f0a9630bd4a7e572023, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1733764875743 2024-12-09T17:21:18,250 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting eb9a1003d3194cd8a97e8cb91ba10ff2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1733764875743 2024-12-09T17:21:18,250 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 60fdf6be160b42069c1ce2d04609f481, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733764875822 2024-12-09T17:21:18,250 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting e5266ca72d464d39b12e1434dc67f924, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733764875826 2024-12-09T17:21:18,251 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 13379488dd7a49668c31cbeb09aab1cd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1733764876464 2024-12-09T17:21:18,251 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5990d6fa0dc8491fbfebb457b678e3ef, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1733764876464 2024-12-09T17:21:18,252 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 98e0e95fbd8c44378e47a09e8c90a814, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733764877624 2024-12-09T17:21:18,253 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f293c21ebef47c5a1003d5c02699b3c, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733764877624 2024-12-09T17:21:18,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-09T17:21:18,273 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#A#compaction#33 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:18,274 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/d0b4ee066873457794ee2810af08e553 is 50, key is test_row_0/A:col10/1733764877624/Put/seqid=0 2024-12-09T17:21:18,286 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#B#compaction#34 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:18,286 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/71813fb9209a4706b46cee2c6d29a8a8 is 50, key is test_row_0/B:col10/1733764877624/Put/seqid=0 2024-12-09T17:21:18,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741872_1048 (size=12527) 2024-12-09T17:21:18,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741873_1049 (size=12527) 2024-12-09T17:21:18,321 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:18,322 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-09T17:21:18,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:18,322 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-09T17:21:18,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:18,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:18,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:18,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:18,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:18,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:18,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/ac2677149b6a4e6dac6c3c85a5d951cc is 50, key is test_row_0/A:col10/1733764877718/Put/seqid=0 2024-12-09T17:21:18,338 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/71813fb9209a4706b46cee2c6d29a8a8 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/71813fb9209a4706b46cee2c6d29a8a8 2024-12-09T17:21:18,348 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/B of 326764652e67b313fc217edc01a9dfcb into 71813fb9209a4706b46cee2c6d29a8a8(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:18,348 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:18,348 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/B, priority=12, startTime=1733764878245; duration=0sec 2024-12-09T17:21:18,348 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:18,348 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:B 2024-12-09T17:21:18,348 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T17:21:18,351 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46200 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-09T17:21:18,351 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/C is initiating minor compaction (all files) 2024-12-09T17:21:18,351 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/C in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:18,351 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/8882a91221304dd3b064646cc45d78f0, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/d126935431034bf5a2d4177cf878ceef, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/823b7307e8b947a0832c35088d38933f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/704caaeb334c4b11af33cf66cb7ff4d5] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=45.1 K 2024-12-09T17:21:18,352 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 8882a91221304dd3b064646cc45d78f0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1733764875743 2024-12-09T17:21:18,353 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting d126935431034bf5a2d4177cf878ceef, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733764875826 2024-12-09T17:21:18,353 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 823b7307e8b947a0832c35088d38933f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1733764876464 2024-12-09T17:21:18,354 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 704caaeb334c4b11af33cf66cb7ff4d5, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733764877624 2024-12-09T17:21:18,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741874_1050 (size=12151) 2024-12-09T17:21:18,373 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#C#compaction#36 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:18,374 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/dbf90c7e784841e2bc8d544808b0b927 is 50, key is test_row_0/C:col10/1733764877624/Put/seqid=0 2024-12-09T17:21:18,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:18,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:18,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741875_1051 (size=12527) 2024-12-09T17:21:18,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:18,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764938404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:18,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:18,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764938405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:18,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:18,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764938405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:18,415 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/dbf90c7e784841e2bc8d544808b0b927 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/dbf90c7e784841e2bc8d544808b0b927 2024-12-09T17:21:18,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:18,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764938409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:18,416 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:18,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764938409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:18,426 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/C of 326764652e67b313fc217edc01a9dfcb into dbf90c7e784841e2bc8d544808b0b927(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:18,426 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:18,426 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/C, priority=12, startTime=1733764878245; duration=0sec 2024-12-09T17:21:18,427 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:18,427 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:C 2024-12-09T17:21:18,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-09T17:21:18,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:18,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764938517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:18,523 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:18,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764938522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:18,722 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/d0b4ee066873457794ee2810af08e553 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/d0b4ee066873457794ee2810af08e553 2024-12-09T17:21:18,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:18,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764938723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:18,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:18,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764938726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:18,739 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/A of 326764652e67b313fc217edc01a9dfcb into d0b4ee066873457794ee2810af08e553(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:18,739 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:18,739 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/A, priority=12, startTime=1733764878245; duration=0sec 2024-12-09T17:21:18,740 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:18,740 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:A 2024-12-09T17:21:18,771 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/ac2677149b6a4e6dac6c3c85a5d951cc 2024-12-09T17:21:18,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-09T17:21:18,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/f06160a2a93a4f4aa1893be963013d40 is 50, key is test_row_0/B:col10/1733764877718/Put/seqid=0 2024-12-09T17:21:18,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741876_1052 (size=12151) 2024-12-09T17:21:18,793 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/f06160a2a93a4f4aa1893be963013d40 2024-12-09T17:21:18,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/4dfa444a673e4d0ab0043405e3088edc is 50, key is test_row_0/C:col10/1733764877718/Put/seqid=0 2024-12-09T17:21:18,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741877_1053 (size=12151) 2024-12-09T17:21:18,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:18,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764938913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:18,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:18,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764938914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:18,917 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:18,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764938916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:19,027 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:19,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764939026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:19,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:19,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764939029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:19,223 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/4dfa444a673e4d0ab0043405e3088edc 2024-12-09T17:21:19,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-09T17:21:19,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/ac2677149b6a4e6dac6c3c85a5d951cc as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ac2677149b6a4e6dac6c3c85a5d951cc 2024-12-09T17:21:19,292 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ac2677149b6a4e6dac6c3c85a5d951cc, entries=150, sequenceid=178, filesize=11.9 K 2024-12-09T17:21:19,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/f06160a2a93a4f4aa1893be963013d40 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/f06160a2a93a4f4aa1893be963013d40 2024-12-09T17:21:19,307 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/f06160a2a93a4f4aa1893be963013d40, entries=150, sequenceid=178, filesize=11.9 K 2024-12-09T17:21:19,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/4dfa444a673e4d0ab0043405e3088edc as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/4dfa444a673e4d0ab0043405e3088edc 2024-12-09T17:21:19,320 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/4dfa444a673e4d0ab0043405e3088edc, entries=150, sequenceid=178, filesize=11.9 K 2024-12-09T17:21:19,322 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 326764652e67b313fc217edc01a9dfcb in 1000ms, sequenceid=178, compaction requested=false 2024-12-09T17:21:19,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:19,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:19,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-09T17:21:19,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-09T17:21:19,327 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-09T17:21:19,328 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1570 sec 2024-12-09T17:21:19,331 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.1640 sec 2024-12-09T17:21:19,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:19,532 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-09T17:21:19,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:19,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:19,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:19,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:19,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:19,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:19,550 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/2424ed53ab8043b887c27bf8447f6c46 is 50, key is test_row_0/A:col10/1733764878391/Put/seqid=0 2024-12-09T17:21:19,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:19,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:19,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764939592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:19,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764939592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:19,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741878_1054 (size=12151) 2024-12-09T17:21:19,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:19,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764939698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:19,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:19,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764939698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:19,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:19,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764939902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:19,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:19,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764939903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:19,919 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:19,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764939918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:19,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:19,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764939920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:19,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:19,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764939923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:20,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/2424ed53ab8043b887c27bf8447f6c46 2024-12-09T17:21:20,028 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/e996e4f753714b74b81760ac81dfa938 is 50, key is test_row_0/B:col10/1733764878391/Put/seqid=0 2024-12-09T17:21:20,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741879_1055 (size=12151) 2024-12-09T17:21:20,044 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/e996e4f753714b74b81760ac81dfa938 2024-12-09T17:21:20,074 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/36489855f21240248937199d6d19ed3c is 50, key is test_row_0/C:col10/1733764878391/Put/seqid=0 2024-12-09T17:21:20,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741880_1056 (size=12151) 2024-12-09T17:21:20,106 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/36489855f21240248937199d6d19ed3c 2024-12-09T17:21:20,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/2424ed53ab8043b887c27bf8447f6c46 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/2424ed53ab8043b887c27bf8447f6c46 2024-12-09T17:21:20,132 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/2424ed53ab8043b887c27bf8447f6c46, entries=150, sequenceid=193, filesize=11.9 K 2024-12-09T17:21:20,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/e996e4f753714b74b81760ac81dfa938 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e996e4f753714b74b81760ac81dfa938 2024-12-09T17:21:20,143 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e996e4f753714b74b81760ac81dfa938, entries=150, sequenceid=193, filesize=11.9 K 2024-12-09T17:21:20,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/36489855f21240248937199d6d19ed3c as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/36489855f21240248937199d6d19ed3c 2024-12-09T17:21:20,165 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/36489855f21240248937199d6d19ed3c, entries=150, sequenceid=193, filesize=11.9 K 2024-12-09T17:21:20,167 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 326764652e67b313fc217edc01a9dfcb in 635ms, sequenceid=193, compaction requested=true 2024-12-09T17:21:20,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:20,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:21:20,168 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:20,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:20,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:21:20,168 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:20,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:20,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:21:20,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:20,170 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:20,170 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/B is initiating minor compaction (all files) 2024-12-09T17:21:20,171 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/B in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:20,171 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/71813fb9209a4706b46cee2c6d29a8a8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/f06160a2a93a4f4aa1893be963013d40, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e996e4f753714b74b81760ac81dfa938] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=36.0 K 2024-12-09T17:21:20,171 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:20,172 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/A is initiating minor compaction (all files) 2024-12-09T17:21:20,172 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71813fb9209a4706b46cee2c6d29a8a8, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733764876464 2024-12-09T17:21:20,172 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/A in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:20,172 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/d0b4ee066873457794ee2810af08e553, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ac2677149b6a4e6dac6c3c85a5d951cc, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/2424ed53ab8043b887c27bf8447f6c46] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=36.0 K 2024-12-09T17:21:20,174 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting f06160a2a93a4f4aa1893be963013d40, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1733764877717 2024-12-09T17:21:20,175 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting e996e4f753714b74b81760ac81dfa938, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733764878391 2024-12-09T17:21:20,175 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting d0b4ee066873457794ee2810af08e553, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733764876464 2024-12-09T17:21:20,176 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting ac2677149b6a4e6dac6c3c85a5d951cc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1733764877717 2024-12-09T17:21:20,176 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 2424ed53ab8043b887c27bf8447f6c46, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733764878391 2024-12-09T17:21:20,201 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#A#compaction#42 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:20,202 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/babbcb91ab024608b14c4b740712e24c is 50, key is test_row_0/A:col10/1733764878391/Put/seqid=0 2024-12-09T17:21:20,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741881_1057 (size=12629) 2024-12-09T17:21:20,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:20,213 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-09T17:21:20,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:20,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:20,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:20,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:20,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:20,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:20,229 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/a5b828d58f234303ac6b46f8bd365f53 is 50, key is test_row_0/A:col10/1733764880206/Put/seqid=0 2024-12-09T17:21:20,230 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/babbcb91ab024608b14c4b740712e24c as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/babbcb91ab024608b14c4b740712e24c 2024-12-09T17:21:20,231 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#B#compaction#43 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:20,232 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/100bd26c7ec9451db4589394c615521e is 50, key is test_row_0/B:col10/1733764878391/Put/seqid=0 2024-12-09T17:21:20,240 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/A of 326764652e67b313fc217edc01a9dfcb into babbcb91ab024608b14c4b740712e24c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:20,241 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:20,241 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/A, priority=13, startTime=1733764880167; duration=0sec 2024-12-09T17:21:20,241 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:20,241 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:A 2024-12-09T17:21:20,241 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:20,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:20,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764940239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:20,243 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:20,243 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/C is initiating minor compaction (all files) 2024-12-09T17:21:20,243 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/C in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:20,244 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/dbf90c7e784841e2bc8d544808b0b927, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/4dfa444a673e4d0ab0043405e3088edc, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/36489855f21240248937199d6d19ed3c] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=36.0 K 2024-12-09T17:21:20,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:20,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764940240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:20,245 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting dbf90c7e784841e2bc8d544808b0b927, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733764876464 2024-12-09T17:21:20,246 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4dfa444a673e4d0ab0043405e3088edc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1733764877717 2024-12-09T17:21:20,247 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 36489855f21240248937199d6d19ed3c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733764878391 2024-12-09T17:21:20,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741882_1058 (size=12629) 2024-12-09T17:21:20,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-09T17:21:20,274 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-12-09T17:21:20,278 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:21:20,279 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#C#compaction#45 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:20,279 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/0c304ca6e44441d49bf532ae94320482 is 50, key is test_row_0/C:col10/1733764878391/Put/seqid=0 2024-12-09T17:21:20,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-12-09T17:21:20,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741883_1059 (size=12151) 2024-12-09T17:21:20,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-09T17:21:20,282 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:21:20,282 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/a5b828d58f234303ac6b46f8bd365f53 2024-12-09T17:21:20,283 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:21:20,283 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:21:20,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741884_1060 (size=12629) 2024-12-09T17:21:20,309 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/ae22810cd670408dab3b99625cbd28a8 is 50, key is test_row_0/B:col10/1733764880206/Put/seqid=0 2024-12-09T17:21:20,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741885_1061 (size=12151) 2024-12-09T17:21:20,331 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/0c304ca6e44441d49bf532ae94320482 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/0c304ca6e44441d49bf532ae94320482 2024-12-09T17:21:20,340 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/C of 326764652e67b313fc217edc01a9dfcb into 0c304ca6e44441d49bf532ae94320482(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:20,341 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:20,341 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/C, priority=13, startTime=1733764880168; duration=0sec 2024-12-09T17:21:20,341 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:20,341 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:C 2024-12-09T17:21:20,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:20,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764940344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:20,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:20,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764940346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:20,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-09T17:21:20,445 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:20,446 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-09T17:21:20,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:20,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:20,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:20,447 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:20,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:20,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:20,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:20,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764940547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:20,550 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:20,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764940549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:20,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-09T17:21:20,599 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:20,600 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-09T17:21:20,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:20,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:20,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:20,600 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:20,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:20,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:20,677 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/100bd26c7ec9451db4589394c615521e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/100bd26c7ec9451db4589394c615521e 2024-12-09T17:21:20,690 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/B of 326764652e67b313fc217edc01a9dfcb into 100bd26c7ec9451db4589394c615521e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:20,690 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:20,690 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/B, priority=13, startTime=1733764880168; duration=0sec 2024-12-09T17:21:20,691 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:20,691 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:B 2024-12-09T17:21:20,731 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/ae22810cd670408dab3b99625cbd28a8 2024-12-09T17:21:20,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/6b5377a4d72e4845a6f9ccf009e0bb35 is 50, key is test_row_0/C:col10/1733764880206/Put/seqid=0 2024-12-09T17:21:20,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741886_1062 (size=12151) 2024-12-09T17:21:20,752 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:20,753 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-09T17:21:20,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:20,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:20,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:20,753 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:20,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:20,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:20,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:20,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764940851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:20,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:20,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764940852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:20,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-09T17:21:20,906 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:20,907 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-09T17:21:20,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:20,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:20,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:20,907 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:20,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:20,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:21,062 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:21,062 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-09T17:21:21,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:21,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:21,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:21,063 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:21,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:21,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:21,150 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/6b5377a4d72e4845a6f9ccf009e0bb35 2024-12-09T17:21:21,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/a5b828d58f234303ac6b46f8bd365f53 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/a5b828d58f234303ac6b46f8bd365f53 2024-12-09T17:21:21,164 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/a5b828d58f234303ac6b46f8bd365f53, entries=150, sequenceid=219, filesize=11.9 K 2024-12-09T17:21:21,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/ae22810cd670408dab3b99625cbd28a8 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/ae22810cd670408dab3b99625cbd28a8 2024-12-09T17:21:21,174 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/ae22810cd670408dab3b99625cbd28a8, entries=150, sequenceid=219, filesize=11.9 K 2024-12-09T17:21:21,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/6b5377a4d72e4845a6f9ccf009e0bb35 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/6b5377a4d72e4845a6f9ccf009e0bb35 2024-12-09T17:21:21,185 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/6b5377a4d72e4845a6f9ccf009e0bb35, entries=150, sequenceid=219, filesize=11.9 K 2024-12-09T17:21:21,186 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 326764652e67b313fc217edc01a9dfcb in 972ms, sequenceid=219, compaction requested=false 2024-12-09T17:21:21,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:21,215 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:21,215 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-09T17:21:21,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:21,216 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-09T17:21:21,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:21,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:21,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:21,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:21,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:21,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:21,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/999e9ccdbf49454c85e19c5ce48bcf41 is 50, key is test_row_0/A:col10/1733764880235/Put/seqid=0 2024-12-09T17:21:21,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741887_1063 (size=12151) 2024-12-09T17:21:21,234 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/999e9ccdbf49454c85e19c5ce48bcf41 2024-12-09T17:21:21,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/3dfd6ae83c254e54ab3d7abeb9bed9b3 is 50, key is test_row_0/B:col10/1733764880235/Put/seqid=0 2024-12-09T17:21:21,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741888_1064 (size=12151) 2024-12-09T17:21:21,265 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/3dfd6ae83c254e54ab3d7abeb9bed9b3 2024-12-09T17:21:21,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/ff9806278edb4e4ead962f90d1b1018b is 50, key is test_row_0/C:col10/1733764880235/Put/seqid=0 2024-12-09T17:21:21,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741889_1065 (size=12151) 2024-12-09T17:21:21,288 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/ff9806278edb4e4ead962f90d1b1018b 2024-12-09T17:21:21,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/999e9ccdbf49454c85e19c5ce48bcf41 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/999e9ccdbf49454c85e19c5ce48bcf41 2024-12-09T17:21:21,309 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/999e9ccdbf49454c85e19c5ce48bcf41, entries=150, sequenceid=232, filesize=11.9 K 2024-12-09T17:21:21,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/3dfd6ae83c254e54ab3d7abeb9bed9b3 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/3dfd6ae83c254e54ab3d7abeb9bed9b3 2024-12-09T17:21:21,322 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/3dfd6ae83c254e54ab3d7abeb9bed9b3, entries=150, sequenceid=232, filesize=11.9 K 2024-12-09T17:21:21,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/ff9806278edb4e4ead962f90d1b1018b as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/ff9806278edb4e4ead962f90d1b1018b 2024-12-09T17:21:21,331 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/ff9806278edb4e4ead962f90d1b1018b, entries=150, sequenceid=232, filesize=11.9 K 2024-12-09T17:21:21,333 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=0 B/0 for 326764652e67b313fc217edc01a9dfcb in 117ms, sequenceid=232, compaction requested=true 2024-12-09T17:21:21,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:21,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:21,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-09T17:21:21,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-09T17:21:21,336 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-09T17:21:21,337 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0520 sec 2024-12-09T17:21:21,338 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.0590 sec 2024-12-09T17:21:21,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-09T17:21:21,385 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-12-09T17:21:21,386 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:21:21,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-12-09T17:21:21,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-09T17:21:21,389 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:21:21,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:21,390 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-09T17:21:21,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:21,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:21,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:21,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:21,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:21,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:21,392 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:21:21,392 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:21:21,399 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/0ea9a79d1b2b4f828b2c3948e6583c28 is 50, key is test_row_0/A:col10/1733764881388/Put/seqid=0 2024-12-09T17:21:21,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741890_1066 (size=12151) 2024-12-09T17:21:21,410 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/0ea9a79d1b2b4f828b2c3948e6583c28 2024-12-09T17:21:21,426 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/a3c851b3906b4224a8368186ed952b12 is 50, key is test_row_0/B:col10/1733764881388/Put/seqid=0 2024-12-09T17:21:21,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741891_1067 (size=12151) 2024-12-09T17:21:21,437 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/a3c851b3906b4224a8368186ed952b12 2024-12-09T17:21:21,444 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:21,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764941442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:21,444 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:21,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764941443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:21,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/f760ea9581b94dfeaaf78f80bd0ea9f1 is 50, key is test_row_0/C:col10/1733764881388/Put/seqid=0 2024-12-09T17:21:21,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741892_1068 (size=12151) 2024-12-09T17:21:21,462 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/f760ea9581b94dfeaaf78f80bd0ea9f1 2024-12-09T17:21:21,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/0ea9a79d1b2b4f828b2c3948e6583c28 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/0ea9a79d1b2b4f828b2c3948e6583c28 2024-12-09T17:21:21,479 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/0ea9a79d1b2b4f828b2c3948e6583c28, entries=150, sequenceid=244, filesize=11.9 K 2024-12-09T17:21:21,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/a3c851b3906b4224a8368186ed952b12 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/a3c851b3906b4224a8368186ed952b12 2024-12-09T17:21:21,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-09T17:21:21,491 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/a3c851b3906b4224a8368186ed952b12, entries=150, sequenceid=244, filesize=11.9 K 2024-12-09T17:21:21,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/f760ea9581b94dfeaaf78f80bd0ea9f1 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/f760ea9581b94dfeaaf78f80bd0ea9f1 2024-12-09T17:21:21,504 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/f760ea9581b94dfeaaf78f80bd0ea9f1, entries=150, sequenceid=244, filesize=11.9 K 2024-12-09T17:21:21,506 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 326764652e67b313fc217edc01a9dfcb in 116ms, sequenceid=244, compaction requested=true 2024-12-09T17:21:21,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:21,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:21:21,506 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T17:21:21,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:21,506 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T17:21:21,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:21:21,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:21,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:21:21,508 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49082 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-09T17:21:21,508 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49082 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-09T17:21:21,508 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/B is initiating minor compaction (all files) 2024-12-09T17:21:21,508 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/A is initiating minor compaction (all files) 2024-12-09T17:21:21,508 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/A in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:21,508 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/B in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:21,508 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/babbcb91ab024608b14c4b740712e24c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/a5b828d58f234303ac6b46f8bd365f53, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/999e9ccdbf49454c85e19c5ce48bcf41, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/0ea9a79d1b2b4f828b2c3948e6583c28] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=47.9 K 2024-12-09T17:21:21,508 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/100bd26c7ec9451db4589394c615521e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/ae22810cd670408dab3b99625cbd28a8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/3dfd6ae83c254e54ab3d7abeb9bed9b3, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/a3c851b3906b4224a8368186ed952b12] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=47.9 K 2024-12-09T17:21:21,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:21,509 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting babbcb91ab024608b14c4b740712e24c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733764878391 2024-12-09T17:21:21,509 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 100bd26c7ec9451db4589394c615521e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733764878391 2024-12-09T17:21:21,509 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5b828d58f234303ac6b46f8bd365f53, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733764879554 2024-12-09T17:21:21,510 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting ae22810cd670408dab3b99625cbd28a8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733764879554 2024-12-09T17:21:21,510 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 999e9ccdbf49454c85e19c5ce48bcf41, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733764880226 2024-12-09T17:21:21,510 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 3dfd6ae83c254e54ab3d7abeb9bed9b3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733764880226 2024-12-09T17:21:21,510 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ea9a79d1b2b4f828b2c3948e6583c28, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733764881362 2024-12-09T17:21:21,511 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting a3c851b3906b4224a8368186ed952b12, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733764881362 2024-12-09T17:21:21,533 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#A#compaction#54 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:21,534 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/42f7051588124fdabeb913398e17fb08 is 50, key is test_row_0/A:col10/1733764881388/Put/seqid=0 2024-12-09T17:21:21,538 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#B#compaction#55 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:21,538 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/496d600e0f724f42a8325e2703f87bbe is 50, key is test_row_0/B:col10/1733764881388/Put/seqid=0 2024-12-09T17:21:21,545 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:21,545 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-09T17:21:21,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:21,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741893_1069 (size=12765) 2024-12-09T17:21:21,546 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-09T17:21:21,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:21,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:21,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:21,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:21,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:21,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:21,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:21,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:21,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/737504b789e5431e9f55fd48ea5866fa is 50, key is test_row_0/A:col10/1733764881439/Put/seqid=0 2024-12-09T17:21:21,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741894_1070 (size=12765) 2024-12-09T17:21:21,562 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/42f7051588124fdabeb913398e17fb08 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/42f7051588124fdabeb913398e17fb08 2024-12-09T17:21:21,573 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/A of 326764652e67b313fc217edc01a9dfcb into 42f7051588124fdabeb913398e17fb08(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:21,573 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:21,573 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/A, priority=12, startTime=1733764881506; duration=0sec 2024-12-09T17:21:21,574 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:21,574 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:A 2024-12-09T17:21:21,574 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T17:21:21,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:21,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764941573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:21,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:21,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764941574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:21,580 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49082 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-09T17:21:21,580 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/C is initiating minor compaction (all files) 2024-12-09T17:21:21,580 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/C in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:21,580 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/0c304ca6e44441d49bf532ae94320482, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/6b5377a4d72e4845a6f9ccf009e0bb35, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/ff9806278edb4e4ead962f90d1b1018b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/f760ea9581b94dfeaaf78f80bd0ea9f1] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=47.9 K 2024-12-09T17:21:21,581 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c304ca6e44441d49bf532ae94320482, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733764878391 2024-12-09T17:21:21,581 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b5377a4d72e4845a6f9ccf009e0bb35, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733764879554 2024-12-09T17:21:21,582 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff9806278edb4e4ead962f90d1b1018b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733764880226 2024-12-09T17:21:21,582 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting f760ea9581b94dfeaaf78f80bd0ea9f1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733764881362 2024-12-09T17:21:21,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741895_1071 (size=12301) 2024-12-09T17:21:21,598 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=268 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/737504b789e5431e9f55fd48ea5866fa 2024-12-09T17:21:21,609 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#C#compaction#57 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:21,610 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/81eda911d1ed4cb382dd5989d687a705 is 50, key is test_row_0/C:col10/1733764881388/Put/seqid=0 2024-12-09T17:21:21,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/2cb14648c49046168191683e6ee1c686 is 50, key is test_row_0/B:col10/1733764881439/Put/seqid=0 2024-12-09T17:21:21,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741897_1073 (size=12301) 2024-12-09T17:21:21,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741896_1072 (size=12765) 2024-12-09T17:21:21,634 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/81eda911d1ed4cb382dd5989d687a705 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/81eda911d1ed4cb382dd5989d687a705 2024-12-09T17:21:21,644 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/C of 326764652e67b313fc217edc01a9dfcb into 81eda911d1ed4cb382dd5989d687a705(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:21,644 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:21,644 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/C, priority=12, startTime=1733764881507; duration=0sec 2024-12-09T17:21:21,644 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:21,644 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:C 2024-12-09T17:21:21,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:21,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764941684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:21,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:21,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764941685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:21,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-09T17:21:21,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:21,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:21,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764941888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:21,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764941889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:21,925 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:21,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764941923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:21,926 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4158 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., hostname=80c69eb3c456,42927,1733764865379, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:21:21,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:21,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764941938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:21,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:21,941 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4172 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., hostname=80c69eb3c456,42927,1733764865379, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:21:21,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764941939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:21,942 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4174 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., hostname=80c69eb3c456,42927,1733764865379, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:21:21,972 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/496d600e0f724f42a8325e2703f87bbe as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/496d600e0f724f42a8325e2703f87bbe 2024-12-09T17:21:21,984 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/B of 326764652e67b313fc217edc01a9dfcb into 496d600e0f724f42a8325e2703f87bbe(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:21,984 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:21,984 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/B, priority=12, startTime=1733764881506; duration=0sec 2024-12-09T17:21:21,984 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:21,985 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:B 2024-12-09T17:21:21,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-09T17:21:22,022 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=268 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/2cb14648c49046168191683e6ee1c686 2024-12-09T17:21:22,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/b46ba932499041dd85baaa5a658b3065 is 50, key is test_row_0/C:col10/1733764881439/Put/seqid=0 2024-12-09T17:21:22,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741898_1074 (size=12301) 2024-12-09T17:21:22,038 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=268 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/b46ba932499041dd85baaa5a658b3065 2024-12-09T17:21:22,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/737504b789e5431e9f55fd48ea5866fa as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/737504b789e5431e9f55fd48ea5866fa 2024-12-09T17:21:22,052 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/737504b789e5431e9f55fd48ea5866fa, entries=150, sequenceid=268, filesize=12.0 K 2024-12-09T17:21:22,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/2cb14648c49046168191683e6ee1c686 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/2cb14648c49046168191683e6ee1c686 2024-12-09T17:21:22,064 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/2cb14648c49046168191683e6ee1c686, entries=150, sequenceid=268, filesize=12.0 K 2024-12-09T17:21:22,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/b46ba932499041dd85baaa5a658b3065 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/b46ba932499041dd85baaa5a658b3065 2024-12-09T17:21:22,080 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/b46ba932499041dd85baaa5a658b3065, entries=150, sequenceid=268, filesize=12.0 K 2024-12-09T17:21:22,082 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 326764652e67b313fc217edc01a9dfcb in 536ms, sequenceid=268, compaction requested=false 2024-12-09T17:21:22,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:22,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:22,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-12-09T17:21:22,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-12-09T17:21:22,086 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-12-09T17:21:22,087 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 692 msec 2024-12-09T17:21:22,089 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 702 msec 2024-12-09T17:21:22,198 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-09T17:21:22,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:22,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:22,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:22,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:22,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:22,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:22,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:22,207 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/3dd535cd640041fe9e70b5fcf303ce84 is 50, key is test_row_0/A:col10/1733764882196/Put/seqid=0 2024-12-09T17:21:22,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741899_1075 (size=12301) 2024-12-09T17:21:22,232 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/3dd535cd640041fe9e70b5fcf303ce84 2024-12-09T17:21:22,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/c1e1e7dac33f42519870a0e6b5c45417 is 50, key is test_row_0/B:col10/1733764882196/Put/seqid=0 2024-12-09T17:21:22,271 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:22,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764942264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:22,271 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:22,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764942271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:22,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741900_1076 (size=12301) 2024-12-09T17:21:22,290 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/c1e1e7dac33f42519870a0e6b5c45417 2024-12-09T17:21:22,313 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/b3b1d258f2c340a58c8b14416c2c0bc7 is 50, key is test_row_0/C:col10/1733764882196/Put/seqid=0 2024-12-09T17:21:22,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741901_1077 (size=12301) 2024-12-09T17:21:22,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:22,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764942373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:22,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:22,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764942375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:22,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-09T17:21:22,493 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-12-09T17:21:22,494 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:21:22,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-12-09T17:21:22,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-09T17:21:22,496 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:21:22,497 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:21:22,497 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:21:22,579 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:22,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764942578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:22,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:22,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764942581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:22,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-09T17:21:22,649 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:22,650 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-09T17:21:22,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:22,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:22,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:22,650 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:22,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:22,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:22,735 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/b3b1d258f2c340a58c8b14416c2c0bc7 2024-12-09T17:21:22,746 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/3dd535cd640041fe9e70b5fcf303ce84 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/3dd535cd640041fe9e70b5fcf303ce84 2024-12-09T17:21:22,755 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/3dd535cd640041fe9e70b5fcf303ce84, entries=150, sequenceid=285, filesize=12.0 K 2024-12-09T17:21:22,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/c1e1e7dac33f42519870a0e6b5c45417 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/c1e1e7dac33f42519870a0e6b5c45417 2024-12-09T17:21:22,764 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/c1e1e7dac33f42519870a0e6b5c45417, entries=150, sequenceid=285, filesize=12.0 K 2024-12-09T17:21:22,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/b3b1d258f2c340a58c8b14416c2c0bc7 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/b3b1d258f2c340a58c8b14416c2c0bc7 2024-12-09T17:21:22,787 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/b3b1d258f2c340a58c8b14416c2c0bc7, entries=150, sequenceid=285, filesize=12.0 K 2024-12-09T17:21:22,790 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 326764652e67b313fc217edc01a9dfcb in 591ms, sequenceid=285, compaction requested=true 2024-12-09T17:21:22,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:22,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:21:22,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:22,790 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:22,790 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:22,792 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:22,792 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/B is initiating minor compaction (all files) 2024-12-09T17:21:22,792 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/B in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:22,792 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/496d600e0f724f42a8325e2703f87bbe, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/2cb14648c49046168191683e6ee1c686, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/c1e1e7dac33f42519870a0e6b5c45417] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=36.5 K 2024-12-09T17:21:22,793 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:22,793 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/A is initiating minor compaction (all files) 2024-12-09T17:21:22,793 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/A in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:22,793 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/42f7051588124fdabeb913398e17fb08, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/737504b789e5431e9f55fd48ea5866fa, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/3dd535cd640041fe9e70b5fcf303ce84] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=36.5 K 2024-12-09T17:21:22,793 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 496d600e0f724f42a8325e2703f87bbe, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733764881362 2024-12-09T17:21:22,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:21:22,793 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 42f7051588124fdabeb913398e17fb08, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733764881362 2024-12-09T17:21:22,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:22,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:21:22,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:22,793 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 2cb14648c49046168191683e6ee1c686, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1733764881435 2024-12-09T17:21:22,794 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 737504b789e5431e9f55fd48ea5866fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1733764881435 2024-12-09T17:21:22,794 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting c1e1e7dac33f42519870a0e6b5c45417, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733764881569 2024-12-09T17:21:22,794 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3dd535cd640041fe9e70b5fcf303ce84, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733764881569 2024-12-09T17:21:22,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-09T17:21:22,804 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:22,804 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-09T17:21:22,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:22,805 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-09T17:21:22,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:22,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:22,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:22,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:22,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:22,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:22,811 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#A#compaction#63 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:22,812 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/5922eb62805e4267b2195d525ccac526 is 50, key is test_row_0/A:col10/1733764882196/Put/seqid=0 2024-12-09T17:21:22,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/b3d85fa0083242d79405052fe5fc14bc is 50, key is test_row_0/A:col10/1733764882251/Put/seqid=0 2024-12-09T17:21:22,818 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#B#compaction#65 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:22,819 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/6b410e61dc9d400d8e3356768b89fd02 is 50, key is test_row_0/B:col10/1733764882196/Put/seqid=0 2024-12-09T17:21:22,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741902_1078 (size=13017) 2024-12-09T17:21:22,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741904_1080 (size=13017) 2024-12-09T17:21:22,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741903_1079 (size=12301) 2024-12-09T17:21:22,858 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=307 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/b3d85fa0083242d79405052fe5fc14bc 2024-12-09T17:21:22,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/5d65c1a8aefe4417a8744151115e13f1 is 50, key is test_row_0/B:col10/1733764882251/Put/seqid=0 2024-12-09T17:21:22,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:22,884 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:22,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741905_1081 (size=12301) 2024-12-09T17:21:22,900 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=307 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/5d65c1a8aefe4417a8744151115e13f1 2024-12-09T17:21:22,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/f10305c95f594ec6ace64e9174297cd6 is 50, key is test_row_0/C:col10/1733764882251/Put/seqid=0 2024-12-09T17:21:22,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:22,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764942912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:22,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:22,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764942912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:22,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741906_1082 (size=12301) 2024-12-09T17:21:23,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:23,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764943016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:23,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:23,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764943017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:23,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-09T17:21:23,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:23,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764943221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:23,224 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:23,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764943222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:23,254 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/5922eb62805e4267b2195d525ccac526 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/5922eb62805e4267b2195d525ccac526 2024-12-09T17:21:23,256 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/6b410e61dc9d400d8e3356768b89fd02 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/6b410e61dc9d400d8e3356768b89fd02 2024-12-09T17:21:23,266 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/B of 326764652e67b313fc217edc01a9dfcb into 6b410e61dc9d400d8e3356768b89fd02(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:23,266 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:23,266 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/B, priority=13, startTime=1733764882790; duration=0sec 2024-12-09T17:21:23,267 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:23,267 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:B 2024-12-09T17:21:23,267 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:23,269 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/A of 326764652e67b313fc217edc01a9dfcb into 5922eb62805e4267b2195d525ccac526(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:23,269 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:23,270 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/A, priority=13, startTime=1733764882790; duration=0sec 2024-12-09T17:21:23,270 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:23,270 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:A 2024-12-09T17:21:23,270 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:23,270 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/C is initiating minor compaction (all files) 2024-12-09T17:21:23,270 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/C in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:23,270 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/81eda911d1ed4cb382dd5989d687a705, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/b46ba932499041dd85baaa5a658b3065, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/b3b1d258f2c340a58c8b14416c2c0bc7] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=36.5 K 2024-12-09T17:21:23,271 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 81eda911d1ed4cb382dd5989d687a705, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1733764881362 2024-12-09T17:21:23,272 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting b46ba932499041dd85baaa5a658b3065, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1733764881435 2024-12-09T17:21:23,272 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting b3b1d258f2c340a58c8b14416c2c0bc7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733764881569 2024-12-09T17:21:23,282 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#C#compaction#68 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:23,283 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/cb24957be7fc4ba0b8fb6868203c94b1 is 50, key is test_row_0/C:col10/1733764882196/Put/seqid=0 2024-12-09T17:21:23,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741907_1083 (size=13017) 2024-12-09T17:21:23,330 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/cb24957be7fc4ba0b8fb6868203c94b1 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/cb24957be7fc4ba0b8fb6868203c94b1 2024-12-09T17:21:23,335 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=307 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/f10305c95f594ec6ace64e9174297cd6 2024-12-09T17:21:23,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/b3d85fa0083242d79405052fe5fc14bc as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/b3d85fa0083242d79405052fe5fc14bc 2024-12-09T17:21:23,349 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/C of 326764652e67b313fc217edc01a9dfcb into cb24957be7fc4ba0b8fb6868203c94b1(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:23,349 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:23,349 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/C, priority=13, startTime=1733764882793; duration=0sec 2024-12-09T17:21:23,349 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:23,349 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:C 2024-12-09T17:21:23,356 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/b3d85fa0083242d79405052fe5fc14bc, entries=150, sequenceid=307, filesize=12.0 K 2024-12-09T17:21:23,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/5d65c1a8aefe4417a8744151115e13f1 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/5d65c1a8aefe4417a8744151115e13f1 2024-12-09T17:21:23,366 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/5d65c1a8aefe4417a8744151115e13f1, entries=150, sequenceid=307, filesize=12.0 K 2024-12-09T17:21:23,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/f10305c95f594ec6ace64e9174297cd6 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/f10305c95f594ec6ace64e9174297cd6 2024-12-09T17:21:23,379 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/f10305c95f594ec6ace64e9174297cd6, entries=150, sequenceid=307, filesize=12.0 K 2024-12-09T17:21:23,382 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 326764652e67b313fc217edc01a9dfcb in 577ms, sequenceid=307, compaction requested=false 2024-12-09T17:21:23,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:23,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:23,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-12-09T17:21:23,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-12-09T17:21:23,390 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-09T17:21:23,390 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 888 msec 2024-12-09T17:21:23,392 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 896 msec 2024-12-09T17:21:23,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:23,526 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-09T17:21:23,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:23,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:23,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:23,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:23,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:23,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:23,535 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/ba217eb877ab4cc68ccb90616644bd9f is 50, key is test_row_0/A:col10/1733764883524/Put/seqid=0 2024-12-09T17:21:23,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741908_1084 (size=12301) 2024-12-09T17:21:23,550 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/ba217eb877ab4cc68ccb90616644bd9f 2024-12-09T17:21:23,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:23,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764943563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:23,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/0defed453edd4d5093e5a554cff132db is 50, key is test_row_0/B:col10/1733764883524/Put/seqid=0 2024-12-09T17:21:23,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:23,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764943566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:23,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741909_1085 (size=12301) 2024-12-09T17:21:23,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-09T17:21:23,601 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-12-09T17:21:23,603 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:21:23,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-12-09T17:21:23,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-09T17:21:23,604 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:21:23,605 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:21:23,605 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:21:23,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:23,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764943667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:23,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:23,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764943669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:23,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-09T17:21:23,756 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:23,756 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-09T17:21:23,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:23,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:23,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:23,757 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:23,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:23,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:23,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:23,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764943871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:23,873 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:23,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764943871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:23,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-09T17:21:23,909 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:23,910 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-09T17:21:23,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:23,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:23,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:23,910 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:23,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:23,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:23,992 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/0defed453edd4d5093e5a554cff132db 2024-12-09T17:21:24,011 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/f868d1b74c614e12898675ca311591a5 is 50, key is test_row_0/C:col10/1733764883524/Put/seqid=0 2024-12-09T17:21:24,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741910_1086 (size=12301) 2024-12-09T17:21:24,062 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:24,063 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-09T17:21:24,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:24,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:24,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:24,063 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:24,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:24,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:24,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:24,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:24,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764944176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:24,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764944176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:24,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-09T17:21:24,215 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:24,216 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-09T17:21:24,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:24,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:24,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:24,216 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:24,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:24,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:24,368 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:24,369 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-09T17:21:24,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:24,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:24,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:24,369 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:24,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:24,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:24,427 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/f868d1b74c614e12898675ca311591a5 2024-12-09T17:21:24,445 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/ba217eb877ab4cc68ccb90616644bd9f as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ba217eb877ab4cc68ccb90616644bd9f 2024-12-09T17:21:24,454 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ba217eb877ab4cc68ccb90616644bd9f, entries=150, sequenceid=325, filesize=12.0 K 2024-12-09T17:21:24,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/0defed453edd4d5093e5a554cff132db as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/0defed453edd4d5093e5a554cff132db 2024-12-09T17:21:24,461 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/0defed453edd4d5093e5a554cff132db, entries=150, sequenceid=325, filesize=12.0 K 2024-12-09T17:21:24,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/f868d1b74c614e12898675ca311591a5 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/f868d1b74c614e12898675ca311591a5 2024-12-09T17:21:24,469 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/f868d1b74c614e12898675ca311591a5, entries=150, sequenceid=325, filesize=12.0 K 2024-12-09T17:21:24,471 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 326764652e67b313fc217edc01a9dfcb in 946ms, sequenceid=325, compaction requested=true 2024-12-09T17:21:24,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:24,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:21:24,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:24,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:21:24,472 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:24,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:24,472 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:24,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:21:24,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:24,473 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:24,473 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:24,473 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/A is initiating minor compaction (all files) 2024-12-09T17:21:24,473 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/B is initiating minor compaction (all files) 2024-12-09T17:21:24,473 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/A in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:24,473 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/B in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:24,474 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/5922eb62805e4267b2195d525ccac526, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/b3d85fa0083242d79405052fe5fc14bc, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ba217eb877ab4cc68ccb90616644bd9f] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=36.7 K 2024-12-09T17:21:24,474 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/6b410e61dc9d400d8e3356768b89fd02, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/5d65c1a8aefe4417a8744151115e13f1, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/0defed453edd4d5093e5a554cff132db] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=36.7 K 2024-12-09T17:21:24,474 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5922eb62805e4267b2195d525ccac526, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733764881569 2024-12-09T17:21:24,474 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b410e61dc9d400d8e3356768b89fd02, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733764881569 2024-12-09T17:21:24,474 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting b3d85fa0083242d79405052fe5fc14bc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=307, earliestPutTs=1733764882251 2024-12-09T17:21:24,474 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d65c1a8aefe4417a8744151115e13f1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=307, earliestPutTs=1733764882251 2024-12-09T17:21:24,475 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 0defed453edd4d5093e5a554cff132db, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1733764882897 2024-12-09T17:21:24,475 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba217eb877ab4cc68ccb90616644bd9f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1733764882897 2024-12-09T17:21:24,493 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#A#compaction#72 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:24,494 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/ac22e22322b44bd9bf8f67c95ee0f053 is 50, key is test_row_0/A:col10/1733764883524/Put/seqid=0 2024-12-09T17:21:24,495 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#B#compaction#73 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:24,495 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/6e38bb7d6cac4fa6a024f7ff378e330d is 50, key is test_row_0/B:col10/1733764883524/Put/seqid=0 2024-12-09T17:21:24,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741912_1088 (size=13119) 2024-12-09T17:21:24,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741911_1087 (size=13119) 2024-12-09T17:21:24,522 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:24,522 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-09T17:21:24,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:24,523 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-09T17:21:24,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:24,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:24,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:24,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:24,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:24,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:24,528 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/ac22e22322b44bd9bf8f67c95ee0f053 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ac22e22322b44bd9bf8f67c95ee0f053 2024-12-09T17:21:24,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/857979f34c1f4e4bac46b1c69c0919fe is 50, key is test_row_0/A:col10/1733764883564/Put/seqid=0 2024-12-09T17:21:24,535 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/A of 326764652e67b313fc217edc01a9dfcb into ac22e22322b44bd9bf8f67c95ee0f053(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:24,535 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:24,535 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/A, priority=13, startTime=1733764884471; duration=0sec 2024-12-09T17:21:24,535 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:24,535 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:A 2024-12-09T17:21:24,536 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:24,537 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:24,538 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/C is initiating minor compaction (all files) 2024-12-09T17:21:24,538 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/C in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:24,538 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/cb24957be7fc4ba0b8fb6868203c94b1, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/f10305c95f594ec6ace64e9174297cd6, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/f868d1b74c614e12898675ca311591a5] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=36.7 K 2024-12-09T17:21:24,539 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb24957be7fc4ba0b8fb6868203c94b1, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733764881569 2024-12-09T17:21:24,539 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting f10305c95f594ec6ace64e9174297cd6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=307, earliestPutTs=1733764882251 2024-12-09T17:21:24,539 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting f868d1b74c614e12898675ca311591a5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1733764882897 2024-12-09T17:21:24,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741913_1089 (size=12301) 2024-12-09T17:21:24,553 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#C#compaction#75 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:24,554 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/97111753fa254298a9256fdf155e71bc is 50, key is test_row_0/C:col10/1733764883524/Put/seqid=0 2024-12-09T17:21:24,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741914_1090 (size=13119) 2024-12-09T17:21:24,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:24,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:24,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-09T17:21:24,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764944713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:24,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:24,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764944715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:24,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:24,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764944818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:24,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:24,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764944821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:24,931 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/6e38bb7d6cac4fa6a024f7ff378e330d as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/6e38bb7d6cac4fa6a024f7ff378e330d 2024-12-09T17:21:24,945 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/B of 326764652e67b313fc217edc01a9dfcb into 6e38bb7d6cac4fa6a024f7ff378e330d(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:24,945 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:24,945 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/B, priority=13, startTime=1733764884471; duration=0sec 2024-12-09T17:21:24,945 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:24,946 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:B 2024-12-09T17:21:24,952 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/857979f34c1f4e4bac46b1c69c0919fe 2024-12-09T17:21:24,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/62a04eba61f1415cb2bdb2efbe5b25a9 is 50, key is test_row_0/B:col10/1733764883564/Put/seqid=0 2024-12-09T17:21:24,985 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/97111753fa254298a9256fdf155e71bc as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/97111753fa254298a9256fdf155e71bc 2024-12-09T17:21:24,996 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/C of 326764652e67b313fc217edc01a9dfcb into 97111753fa254298a9256fdf155e71bc(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:24,996 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:24,996 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/C, priority=13, startTime=1733764884472; duration=0sec 2024-12-09T17:21:24,996 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:24,996 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:C 2024-12-09T17:21:25,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741915_1091 (size=12301) 2024-12-09T17:21:25,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:25,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764945021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:25,026 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:25,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764945025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:25,329 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:25,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764945328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:25,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:25,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764945330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:25,419 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/62a04eba61f1415cb2bdb2efbe5b25a9 2024-12-09T17:21:25,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/6e2f37839cde417ebef695cce01ba2ce is 50, key is test_row_0/C:col10/1733764883564/Put/seqid=0 2024-12-09T17:21:25,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741916_1092 (size=12301) 2024-12-09T17:21:25,450 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/6e2f37839cde417ebef695cce01ba2ce 2024-12-09T17:21:25,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/857979f34c1f4e4bac46b1c69c0919fe as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/857979f34c1f4e4bac46b1c69c0919fe 2024-12-09T17:21:25,465 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/857979f34c1f4e4bac46b1c69c0919fe, entries=150, sequenceid=346, filesize=12.0 K 2024-12-09T17:21:25,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/62a04eba61f1415cb2bdb2efbe5b25a9 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/62a04eba61f1415cb2bdb2efbe5b25a9 2024-12-09T17:21:25,486 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/62a04eba61f1415cb2bdb2efbe5b25a9, entries=150, sequenceid=346, filesize=12.0 K 2024-12-09T17:21:25,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/6e2f37839cde417ebef695cce01ba2ce as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/6e2f37839cde417ebef695cce01ba2ce 2024-12-09T17:21:25,497 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/6e2f37839cde417ebef695cce01ba2ce, entries=150, sequenceid=346, filesize=12.0 K 2024-12-09T17:21:25,499 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 326764652e67b313fc217edc01a9dfcb in 977ms, sequenceid=346, compaction requested=false 2024-12-09T17:21:25,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:25,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:25,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-12-09T17:21:25,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-12-09T17:21:25,503 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-09T17:21:25,503 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8950 sec 2024-12-09T17:21:25,505 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.9010 sec 2024-12-09T17:21:25,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-09T17:21:25,709 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-12-09T17:21:25,710 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:21:25,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-12-09T17:21:25,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-09T17:21:25,714 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:21:25,715 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:21:25,715 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:21:25,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-09T17:21:25,839 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-09T17:21:25,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:25,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:25,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:25,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:25,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:25,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:25,840 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:25,857 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/8a3745e2d4b34899b955f04be1ec2a69 is 50, key is test_row_0/A:col10/1733764885837/Put/seqid=0 2024-12-09T17:21:25,868 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:25,869 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-09T17:21:25,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:25,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:25,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:25,869 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:25,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:25,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:25,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:25,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:25,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764945877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:25,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764945877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:25,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741917_1093 (size=17181) 2024-12-09T17:21:25,885 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/8a3745e2d4b34899b955f04be1ec2a69 2024-12-09T17:21:25,901 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/e75e65dfbf1445e6a147cc1a5fe9f215 is 50, key is test_row_0/B:col10/1733764885837/Put/seqid=0 2024-12-09T17:21:25,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741918_1094 (size=12301) 2024-12-09T17:21:25,940 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/e75e65dfbf1445e6a147cc1a5fe9f215 2024-12-09T17:21:25,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:25,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36836 deadline: 1733764945940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:25,945 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8177 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., hostname=80c69eb3c456,42927,1733764865379, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:21:25,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:25,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36812 deadline: 1733764945953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:25,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:25,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36822 deadline: 1733764945955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:25,958 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8190 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., hostname=80c69eb3c456,42927,1733764865379, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:21:25,959 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8189 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., hostname=80c69eb3c456,42927,1733764865379, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:21:25,959 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/aa44a4192be44e0a9f24f65c809a4801 is 50, key is test_row_0/C:col10/1733764885837/Put/seqid=0 2024-12-09T17:21:25,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:25,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764945981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:25,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:25,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764945982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:25,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741919_1095 (size=12301) 2024-12-09T17:21:25,995 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/aa44a4192be44e0a9f24f65c809a4801 2024-12-09T17:21:26,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/8a3745e2d4b34899b955f04be1ec2a69 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/8a3745e2d4b34899b955f04be1ec2a69 2024-12-09T17:21:26,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-09T17:21:26,023 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/8a3745e2d4b34899b955f04be1ec2a69, entries=250, sequenceid=366, filesize=16.8 K 2024-12-09T17:21:26,025 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/e75e65dfbf1445e6a147cc1a5fe9f215 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e75e65dfbf1445e6a147cc1a5fe9f215 2024-12-09T17:21:26,030 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:26,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-09T17:21:26,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:26,032 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e75e65dfbf1445e6a147cc1a5fe9f215, entries=150, sequenceid=366, filesize=12.0 K 2024-12-09T17:21:26,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:26,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:26,033 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:26,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:26,035 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/aa44a4192be44e0a9f24f65c809a4801 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/aa44a4192be44e0a9f24f65c809a4801 2024-12-09T17:21:26,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:26,044 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/aa44a4192be44e0a9f24f65c809a4801, entries=150, sequenceid=366, filesize=12.0 K 2024-12-09T17:21:26,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 326764652e67b313fc217edc01a9dfcb in 206ms, sequenceid=366, compaction requested=true 2024-12-09T17:21:26,046 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:26,046 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:26,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:21:26,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:26,047 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:26,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:21:26,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:26,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:21:26,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:26,049 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42601 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:26,049 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/A is initiating minor compaction (all files) 2024-12-09T17:21:26,049 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/A in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:26,049 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ac22e22322b44bd9bf8f67c95ee0f053, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/857979f34c1f4e4bac46b1c69c0919fe, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/8a3745e2d4b34899b955f04be1ec2a69] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=41.6 K 2024-12-09T17:21:26,050 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac22e22322b44bd9bf8f67c95ee0f053, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1733764882897 2024-12-09T17:21:26,050 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:26,050 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/B is initiating minor compaction (all files) 2024-12-09T17:21:26,050 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/B in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:26,050 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/6e38bb7d6cac4fa6a024f7ff378e330d, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/62a04eba61f1415cb2bdb2efbe5b25a9, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e75e65dfbf1445e6a147cc1a5fe9f215] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=36.8 K 2024-12-09T17:21:26,051 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 857979f34c1f4e4bac46b1c69c0919fe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1733764883559 2024-12-09T17:21:26,051 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a3745e2d4b34899b955f04be1ec2a69, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1733764884703 2024-12-09T17:21:26,051 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e38bb7d6cac4fa6a024f7ff378e330d, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1733764882897 2024-12-09T17:21:26,052 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 62a04eba61f1415cb2bdb2efbe5b25a9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1733764883559 2024-12-09T17:21:26,052 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting e75e65dfbf1445e6a147cc1a5fe9f215, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1733764884703 2024-12-09T17:21:26,066 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#A#compaction#81 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:26,067 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/b877fd552ec747dfbd3ab153b9cd3c90 is 50, key is test_row_0/A:col10/1733764885837/Put/seqid=0 2024-12-09T17:21:26,075 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#B#compaction#82 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:26,076 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/77e8aea254cf43c2be96ec2dde0320d0 is 50, key is test_row_0/B:col10/1733764885837/Put/seqid=0 2024-12-09T17:21:26,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741920_1096 (size=13221) 2024-12-09T17:21:26,091 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/b877fd552ec747dfbd3ab153b9cd3c90 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/b877fd552ec747dfbd3ab153b9cd3c90 2024-12-09T17:21:26,097 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/A of 326764652e67b313fc217edc01a9dfcb into b877fd552ec747dfbd3ab153b9cd3c90(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:26,097 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:26,097 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/A, priority=13, startTime=1733764886046; duration=0sec 2024-12-09T17:21:26,097 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:26,097 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:A 2024-12-09T17:21:26,097 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:26,099 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:26,100 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/C is initiating minor compaction (all files) 2024-12-09T17:21:26,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741921_1097 (size=13221) 2024-12-09T17:21:26,100 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/C in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:26,100 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/97111753fa254298a9256fdf155e71bc, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/6e2f37839cde417ebef695cce01ba2ce, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/aa44a4192be44e0a9f24f65c809a4801] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=36.8 K 2024-12-09T17:21:26,101 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97111753fa254298a9256fdf155e71bc, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1733764882897 2024-12-09T17:21:26,101 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e2f37839cde417ebef695cce01ba2ce, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1733764883559 2024-12-09T17:21:26,102 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa44a4192be44e0a9f24f65c809a4801, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1733764884703 2024-12-09T17:21:26,116 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/77e8aea254cf43c2be96ec2dde0320d0 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/77e8aea254cf43c2be96ec2dde0320d0 2024-12-09T17:21:26,127 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/B of 326764652e67b313fc217edc01a9dfcb into 77e8aea254cf43c2be96ec2dde0320d0(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:26,127 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:26,127 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/B, priority=13, startTime=1733764886046; duration=0sec 2024-12-09T17:21:26,127 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:26,127 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:B 2024-12-09T17:21:26,139 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#C#compaction#83 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:26,139 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/46637fdfaacc4140b98940fbcb9bff0e is 50, key is test_row_0/C:col10/1733764885837/Put/seqid=0 2024-12-09T17:21:26,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741922_1098 (size=13221) 2024-12-09T17:21:26,187 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:26,188 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-09T17:21:26,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:26,189 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-09T17:21:26,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:26,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:26,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:26,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:26,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:26,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:26,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:26,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:26,198 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/46637fdfaacc4140b98940fbcb9bff0e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/46637fdfaacc4140b98940fbcb9bff0e 2024-12-09T17:21:26,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/09a1c70f14b44c4183e0c55b4fc62556 is 50, key is test_row_0/A:col10/1733764885874/Put/seqid=0 2024-12-09T17:21:26,215 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/C of 326764652e67b313fc217edc01a9dfcb into 46637fdfaacc4140b98940fbcb9bff0e(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:26,215 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:26,215 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/C, priority=13, startTime=1733764886047; duration=0sec 2024-12-09T17:21:26,215 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:26,215 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:C 2024-12-09T17:21:26,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:26,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764946227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:26,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:26,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764946229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:26,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741923_1099 (size=14741) 2024-12-09T17:21:26,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-09T17:21:26,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:26,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764946331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:26,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:26,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764946333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:26,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:26,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764946535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:26,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:26,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764946535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:26,642 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=388 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/09a1c70f14b44c4183e0c55b4fc62556 2024-12-09T17:21:26,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/0c6f025fad4847f79b4c6a6b175bd626 is 50, key is test_row_0/B:col10/1733764885874/Put/seqid=0 2024-12-09T17:21:26,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741924_1100 (size=12301) 2024-12-09T17:21:26,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-09T17:21:26,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:26,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764946839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:26,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:26,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764946841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:27,060 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=388 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/0c6f025fad4847f79b4c6a6b175bd626 2024-12-09T17:21:27,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/5e24116fd9a14fa7ac81d446974d90e5 is 50, key is test_row_0/C:col10/1733764885874/Put/seqid=0 2024-12-09T17:21:27,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741925_1101 (size=12301) 2024-12-09T17:21:27,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:27,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764947345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:27,346 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:27,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764947346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:27,473 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=388 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/5e24116fd9a14fa7ac81d446974d90e5 2024-12-09T17:21:27,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/09a1c70f14b44c4183e0c55b4fc62556 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/09a1c70f14b44c4183e0c55b4fc62556 2024-12-09T17:21:27,485 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/09a1c70f14b44c4183e0c55b4fc62556, entries=200, sequenceid=388, filesize=14.4 K 2024-12-09T17:21:27,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/0c6f025fad4847f79b4c6a6b175bd626 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/0c6f025fad4847f79b4c6a6b175bd626 2024-12-09T17:21:27,494 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/0c6f025fad4847f79b4c6a6b175bd626, entries=150, sequenceid=388, filesize=12.0 K 2024-12-09T17:21:27,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/5e24116fd9a14fa7ac81d446974d90e5 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/5e24116fd9a14fa7ac81d446974d90e5 2024-12-09T17:21:27,507 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/5e24116fd9a14fa7ac81d446974d90e5, entries=150, sequenceid=388, filesize=12.0 K 2024-12-09T17:21:27,509 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 326764652e67b313fc217edc01a9dfcb in 1320ms, sequenceid=388, compaction requested=false 2024-12-09T17:21:27,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:27,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:27,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-09T17:21:27,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-12-09T17:21:27,512 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-12-09T17:21:27,512 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7960 sec 2024-12-09T17:21:27,514 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.8020 sec 2024-12-09T17:21:27,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-09T17:21:27,819 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-12-09T17:21:27,820 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:21:27,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-12-09T17:21:27,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-09T17:21:27,822 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:21:27,822 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:21:27,822 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:21:27,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-09T17:21:27,974 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:27,974 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-09T17:21:27,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:27,975 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-09T17:21:27,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:27,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:27,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:27,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:27,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:27,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:27,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/d1cae0485945448f97359991100173f6 is 50, key is test_row_0/A:col10/1733764886226/Put/seqid=0 2024-12-09T17:21:27,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741926_1102 (size=12301) 2024-12-09T17:21:28,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-09T17:21:28,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:28,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:28,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:28,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764948372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:28,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:28,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764948372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:28,393 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/d1cae0485945448f97359991100173f6 2024-12-09T17:21:28,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/bf8734d6846d4039a83fd61e437a6cb5 is 50, key is test_row_0/B:col10/1733764886226/Put/seqid=0 2024-12-09T17:21:28,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-09T17:21:28,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741927_1103 (size=12301) 2024-12-09T17:21:28,431 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/bf8734d6846d4039a83fd61e437a6cb5 2024-12-09T17:21:28,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/efa5a3c301434fe3b027441c35d9def1 is 50, key is test_row_0/C:col10/1733764886226/Put/seqid=0 2024-12-09T17:21:28,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741928_1104 (size=12301) 2024-12-09T17:21:28,460 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/efa5a3c301434fe3b027441c35d9def1 2024-12-09T17:21:28,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/d1cae0485945448f97359991100173f6 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/d1cae0485945448f97359991100173f6 2024-12-09T17:21:28,472 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/d1cae0485945448f97359991100173f6, entries=150, sequenceid=405, filesize=12.0 K 2024-12-09T17:21:28,476 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:28,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764948475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:28,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:28,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764948477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:28,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/bf8734d6846d4039a83fd61e437a6cb5 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/bf8734d6846d4039a83fd61e437a6cb5 2024-12-09T17:21:28,487 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/bf8734d6846d4039a83fd61e437a6cb5, entries=150, sequenceid=405, filesize=12.0 K 2024-12-09T17:21:28,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/efa5a3c301434fe3b027441c35d9def1 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/efa5a3c301434fe3b027441c35d9def1 2024-12-09T17:21:28,495 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/efa5a3c301434fe3b027441c35d9def1, entries=150, sequenceid=405, filesize=12.0 K 2024-12-09T17:21:28,496 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 326764652e67b313fc217edc01a9dfcb in 522ms, sequenceid=405, compaction requested=true 2024-12-09T17:21:28,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:28,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:28,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-12-09T17:21:28,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-12-09T17:21:28,500 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-09T17:21:28,500 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 676 msec 2024-12-09T17:21:28,502 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 680 msec 2024-12-09T17:21:28,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:28,691 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-09T17:21:28,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:28,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:28,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:28,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:28,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:28,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:28,696 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/106a615ac32c48f68ef3cd4fde9270ae is 50, key is test_row_0/A:col10/1733764888369/Put/seqid=0 2024-12-09T17:21:28,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741929_1105 (size=14741) 2024-12-09T17:21:28,701 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=427 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/106a615ac32c48f68ef3cd4fde9270ae 2024-12-09T17:21:28,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:28,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764948707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:28,709 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:28,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764948707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:28,712 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/7754add93b4d4378b67a7909fcd8fa63 is 50, key is test_row_0/B:col10/1733764888369/Put/seqid=0 2024-12-09T17:21:28,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741930_1106 (size=12301) 2024-12-09T17:21:28,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:28,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764948810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:28,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:28,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764948810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:28,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-09T17:21:28,925 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-12-09T17:21:28,926 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:21:28,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees 2024-12-09T17:21:28,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-09T17:21:28,928 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:21:28,928 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:21:28,928 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:21:29,012 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:29,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764949012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:29,016 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:29,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764949015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:29,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-09T17:21:29,080 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:29,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-09T17:21:29,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:29,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:29,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:29,081 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:29,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:29,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:29,132 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=427 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/7754add93b4d4378b67a7909fcd8fa63 2024-12-09T17:21:29,140 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/daabd65757144c24ae25b3ad7ef60163 is 50, key is test_row_0/C:col10/1733764888369/Put/seqid=0 2024-12-09T17:21:29,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741931_1107 (size=12301) 2024-12-09T17:21:29,146 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=427 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/daabd65757144c24ae25b3ad7ef60163 2024-12-09T17:21:29,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/106a615ac32c48f68ef3cd4fde9270ae as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/106a615ac32c48f68ef3cd4fde9270ae 2024-12-09T17:21:29,159 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/106a615ac32c48f68ef3cd4fde9270ae, entries=200, sequenceid=427, filesize=14.4 K 2024-12-09T17:21:29,160 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/7754add93b4d4378b67a7909fcd8fa63 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/7754add93b4d4378b67a7909fcd8fa63 2024-12-09T17:21:29,165 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/7754add93b4d4378b67a7909fcd8fa63, entries=150, sequenceid=427, filesize=12.0 K 2024-12-09T17:21:29,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/daabd65757144c24ae25b3ad7ef60163 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/daabd65757144c24ae25b3ad7ef60163 2024-12-09T17:21:29,173 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/daabd65757144c24ae25b3ad7ef60163, entries=150, sequenceid=427, filesize=12.0 K 2024-12-09T17:21:29,175 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 326764652e67b313fc217edc01a9dfcb in 484ms, sequenceid=427, compaction requested=true 2024-12-09T17:21:29,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:29,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:21:29,175 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T17:21:29,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:29,175 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T17:21:29,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:21:29,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:29,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:21:29,175 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:29,177 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50124 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-09T17:21:29,177 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/B is initiating minor compaction (all files) 2024-12-09T17:21:29,177 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/B in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:29,177 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/77e8aea254cf43c2be96ec2dde0320d0, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/0c6f025fad4847f79b4c6a6b175bd626, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/bf8734d6846d4039a83fd61e437a6cb5, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/7754add93b4d4378b67a7909fcd8fa63] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=48.9 K 2024-12-09T17:21:29,177 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-09T17:21:29,177 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/A is initiating minor compaction (all files) 2024-12-09T17:21:29,177 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/A in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:29,177 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 77e8aea254cf43c2be96ec2dde0320d0, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1733764884703 2024-12-09T17:21:29,177 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/b877fd552ec747dfbd3ab153b9cd3c90, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/09a1c70f14b44c4183e0c55b4fc62556, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/d1cae0485945448f97359991100173f6, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/106a615ac32c48f68ef3cd4fde9270ae] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=53.7 K 2024-12-09T17:21:29,178 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting b877fd552ec747dfbd3ab153b9cd3c90, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1733764884703 2024-12-09T17:21:29,178 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c6f025fad4847f79b4c6a6b175bd626, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=388, earliestPutTs=1733764885869 2024-12-09T17:21:29,179 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 09a1c70f14b44c4183e0c55b4fc62556, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=388, earliestPutTs=1733764885869 2024-12-09T17:21:29,179 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting bf8734d6846d4039a83fd61e437a6cb5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1733764886221 2024-12-09T17:21:29,179 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1cae0485945448f97359991100173f6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1733764886221 2024-12-09T17:21:29,179 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 7754add93b4d4378b67a7909fcd8fa63, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1733764888369 2024-12-09T17:21:29,180 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 106a615ac32c48f68ef3cd4fde9270ae, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1733764888358 2024-12-09T17:21:29,200 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#A#compaction#93 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:29,200 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/4e166b6bb4d341b1afa4077922bfd791 is 50, key is test_row_0/A:col10/1733764888369/Put/seqid=0 2024-12-09T17:21:29,207 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#B#compaction#94 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:29,208 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/f7a18a0ee252446ca4f69916666ad9bd is 50, key is test_row_0/B:col10/1733764888369/Put/seqid=0 2024-12-09T17:21:29,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741932_1108 (size=13357) 2024-12-09T17:21:29,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741933_1109 (size=13357) 2024-12-09T17:21:29,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-09T17:21:29,233 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:29,233 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-09T17:21:29,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:29,233 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-09T17:21:29,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:29,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:29,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:29,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:29,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:29,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:29,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/71f01b684c62499d827668fe1a7a25de is 50, key is test_row_0/A:col10/1733764888705/Put/seqid=0 2024-12-09T17:21:29,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741934_1110 (size=12301) 2024-12-09T17:21:29,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:29,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:29,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:29,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764949341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:29,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:29,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764949343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:29,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:29,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764949444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:29,445 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:29,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764949444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:29,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-09T17:21:29,615 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/4e166b6bb4d341b1afa4077922bfd791 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/4e166b6bb4d341b1afa4077922bfd791 2024-12-09T17:21:29,616 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/f7a18a0ee252446ca4f69916666ad9bd as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/f7a18a0ee252446ca4f69916666ad9bd 2024-12-09T17:21:29,620 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/A of 326764652e67b313fc217edc01a9dfcb into 4e166b6bb4d341b1afa4077922bfd791(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:29,620 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:29,620 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/A, priority=12, startTime=1733764889175; duration=0sec 2024-12-09T17:21:29,620 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:29,620 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:A 2024-12-09T17:21:29,621 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T17:21:29,622 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/B of 326764652e67b313fc217edc01a9dfcb into f7a18a0ee252446ca4f69916666ad9bd(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:29,622 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:29,622 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/B, priority=12, startTime=1733764889175; duration=0sec 2024-12-09T17:21:29,622 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:29,622 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:B 2024-12-09T17:21:29,622 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50124 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-09T17:21:29,622 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/C is initiating minor compaction (all files) 2024-12-09T17:21:29,622 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/C in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:29,622 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/46637fdfaacc4140b98940fbcb9bff0e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/5e24116fd9a14fa7ac81d446974d90e5, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/efa5a3c301434fe3b027441c35d9def1, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/daabd65757144c24ae25b3ad7ef60163] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=48.9 K 2024-12-09T17:21:29,623 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46637fdfaacc4140b98940fbcb9bff0e, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1733764884703 2024-12-09T17:21:29,623 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e24116fd9a14fa7ac81d446974d90e5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=388, earliestPutTs=1733764885869 2024-12-09T17:21:29,624 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting efa5a3c301434fe3b027441c35d9def1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1733764886221 2024-12-09T17:21:29,624 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting daabd65757144c24ae25b3ad7ef60163, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1733764888369 2024-12-09T17:21:29,638 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#C#compaction#96 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:29,639 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/1be88bb7e5024a57b81cc8d6b174664c is 50, key is test_row_0/C:col10/1733764888369/Put/seqid=0 2024-12-09T17:21:29,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741935_1111 (size=13357) 2024-12-09T17:21:29,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:29,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764949646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:29,648 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:29,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764949647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:29,696 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/71f01b684c62499d827668fe1a7a25de 2024-12-09T17:21:29,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/2e35abb8ffb44102b01386496c06b774 is 50, key is test_row_0/B:col10/1733764888705/Put/seqid=0 2024-12-09T17:21:29,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741936_1112 (size=12301) 2024-12-09T17:21:29,710 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/2e35abb8ffb44102b01386496c06b774 2024-12-09T17:21:29,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/e8474bb755f84fada0b70ad7853bac52 is 50, key is test_row_0/C:col10/1733764888705/Put/seqid=0 2024-12-09T17:21:29,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741937_1113 (size=12301) 2024-12-09T17:21:29,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:29,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764949948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:29,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:29,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764949949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:30,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-09T17:21:30,050 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/1be88bb7e5024a57b81cc8d6b174664c as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/1be88bb7e5024a57b81cc8d6b174664c 2024-12-09T17:21:30,055 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/C of 326764652e67b313fc217edc01a9dfcb into 1be88bb7e5024a57b81cc8d6b174664c(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:30,056 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:30,056 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/C, priority=12, startTime=1733764889175; duration=0sec 2024-12-09T17:21:30,056 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:30,056 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:C 2024-12-09T17:21:30,123 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/e8474bb755f84fada0b70ad7853bac52 2024-12-09T17:21:30,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/71f01b684c62499d827668fe1a7a25de as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/71f01b684c62499d827668fe1a7a25de 2024-12-09T17:21:30,133 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/71f01b684c62499d827668fe1a7a25de, entries=150, sequenceid=441, filesize=12.0 K 2024-12-09T17:21:30,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/2e35abb8ffb44102b01386496c06b774 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/2e35abb8ffb44102b01386496c06b774 2024-12-09T17:21:30,140 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/2e35abb8ffb44102b01386496c06b774, entries=150, sequenceid=441, filesize=12.0 K 2024-12-09T17:21:30,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/e8474bb755f84fada0b70ad7853bac52 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/e8474bb755f84fada0b70ad7853bac52 2024-12-09T17:21:30,147 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/e8474bb755f84fada0b70ad7853bac52, entries=150, sequenceid=441, filesize=12.0 K 2024-12-09T17:21:30,148 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 326764652e67b313fc217edc01a9dfcb in 915ms, sequenceid=441, compaction requested=false 2024-12-09T17:21:30,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:30,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:30,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=33 2024-12-09T17:21:30,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=33 2024-12-09T17:21:30,150 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-12-09T17:21:30,151 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2210 sec 2024-12-09T17:21:30,151 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees in 1.2250 sec 2024-12-09T17:21:30,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:30,454 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-09T17:21:30,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:30,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:30,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:30,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:30,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:30,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:30,460 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/ac74c9b6308f4b1c859f02fc3ca31442 is 50, key is test_row_0/A:col10/1733764890453/Put/seqid=0 2024-12-09T17:21:30,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741938_1114 (size=14741) 2024-12-09T17:21:30,474 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:30,474 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:30,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764950472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:30,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 283 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764950473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:30,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:30,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764950575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:30,576 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:30,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 285 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764950575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:30,778 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:30,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 287 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764950777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:30,779 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:30,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764950778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:30,867 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=467 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/ac74c9b6308f4b1c859f02fc3ca31442 2024-12-09T17:21:30,876 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/6a62cd98f5c2424ba8e3f0a9ada41950 is 50, key is test_row_0/B:col10/1733764890453/Put/seqid=0 2024-12-09T17:21:30,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741939_1115 (size=12301) 2024-12-09T17:21:31,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-09T17:21:31,031 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-12-09T17:21:31,032 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:21:31,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees 2024-12-09T17:21:31,034 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=34, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:21:31,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-09T17:21:31,034 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=34, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:21:31,034 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:21:31,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:31,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 289 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764951079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:31,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:31,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764951081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:31,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-09T17:21:31,186 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:31,186 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-12-09T17:21:31,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:31,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:31,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:31,186 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:31,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:31,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:31,286 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=467 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/6a62cd98f5c2424ba8e3f0a9ada41950 2024-12-09T17:21:31,294 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/4c1954484a224529b699769e47e0aa11 is 50, key is test_row_0/C:col10/1733764890453/Put/seqid=0 2024-12-09T17:21:31,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741940_1116 (size=12301) 2024-12-09T17:21:31,302 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=467 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/4c1954484a224529b699769e47e0aa11 2024-12-09T17:21:31,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/ac74c9b6308f4b1c859f02fc3ca31442 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ac74c9b6308f4b1c859f02fc3ca31442 2024-12-09T17:21:31,312 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ac74c9b6308f4b1c859f02fc3ca31442, entries=200, sequenceid=467, filesize=14.4 K 2024-12-09T17:21:31,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/6a62cd98f5c2424ba8e3f0a9ada41950 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/6a62cd98f5c2424ba8e3f0a9ada41950 2024-12-09T17:21:31,319 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/6a62cd98f5c2424ba8e3f0a9ada41950, entries=150, sequenceid=467, filesize=12.0 K 2024-12-09T17:21:31,321 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/4c1954484a224529b699769e47e0aa11 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/4c1954484a224529b699769e47e0aa11 2024-12-09T17:21:31,330 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/4c1954484a224529b699769e47e0aa11, entries=150, sequenceid=467, filesize=12.0 K 2024-12-09T17:21:31,332 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 326764652e67b313fc217edc01a9dfcb in 877ms, sequenceid=467, compaction requested=true 2024-12-09T17:21:31,332 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:31,332 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:31,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:21:31,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:31,333 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:31,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:21:31,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:31,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:21:31,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:31,334 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40399 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:31,334 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/A is initiating minor compaction (all files) 2024-12-09T17:21:31,334 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/A in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:31,334 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/4e166b6bb4d341b1afa4077922bfd791, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/71f01b684c62499d827668fe1a7a25de, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ac74c9b6308f4b1c859f02fc3ca31442] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=39.5 K 2024-12-09T17:21:31,335 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37959 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:31,335 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e166b6bb4d341b1afa4077922bfd791, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1733764888369 2024-12-09T17:21:31,335 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/B is initiating minor compaction (all files) 2024-12-09T17:21:31,335 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/B in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:31,335 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/f7a18a0ee252446ca4f69916666ad9bd, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/2e35abb8ffb44102b01386496c06b774, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/6a62cd98f5c2424ba8e3f0a9ada41950] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=37.1 K 2024-12-09T17:21:31,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-09T17:21:31,335 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting f7a18a0ee252446ca4f69916666ad9bd, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1733764888369 2024-12-09T17:21:31,335 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71f01b684c62499d827668fe1a7a25de, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1733764888701 2024-12-09T17:21:31,336 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e35abb8ffb44102b01386496c06b774, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1733764888701 2024-12-09T17:21:31,336 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac74c9b6308f4b1c859f02fc3ca31442, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1733764889333 2024-12-09T17:21:31,336 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a62cd98f5c2424ba8e3f0a9ada41950, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1733764889336 2024-12-09T17:21:31,339 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:31,339 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-12-09T17:21:31,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:31,339 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-09T17:21:31,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:31,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:31,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:31,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:31,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:31,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:31,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/dcb99e2320534bfabc9dafe7c6190090 is 50, key is test_row_0/A:col10/1733764890471/Put/seqid=0 2024-12-09T17:21:31,353 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#B#compaction#103 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:31,353 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/cb80b0ceacda46cda07fc563e30b753c is 50, key is test_row_0/B:col10/1733764890453/Put/seqid=0 2024-12-09T17:21:31,356 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#A#compaction#104 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:31,356 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/ef4bcd78bb6a440cbd4fdaf09a6a3bd7 is 50, key is test_row_0/A:col10/1733764890453/Put/seqid=0 2024-12-09T17:21:31,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741942_1118 (size=13459) 2024-12-09T17:21:31,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741941_1117 (size=12301) 2024-12-09T17:21:31,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741943_1119 (size=13459) 2024-12-09T17:21:31,424 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/ef4bcd78bb6a440cbd4fdaf09a6a3bd7 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ef4bcd78bb6a440cbd4fdaf09a6a3bd7 2024-12-09T17:21:31,429 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/A of 326764652e67b313fc217edc01a9dfcb into ef4bcd78bb6a440cbd4fdaf09a6a3bd7(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:31,429 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:31,429 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/A, priority=13, startTime=1733764891332; duration=0sec 2024-12-09T17:21:31,430 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:31,430 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:A 2024-12-09T17:21:31,430 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:31,431 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37959 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:31,431 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/C is initiating minor compaction (all files) 2024-12-09T17:21:31,432 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/C in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:31,432 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/1be88bb7e5024a57b81cc8d6b174664c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/e8474bb755f84fada0b70ad7853bac52, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/4c1954484a224529b699769e47e0aa11] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=37.1 K 2024-12-09T17:21:31,432 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1be88bb7e5024a57b81cc8d6b174664c, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1733764888369 2024-12-09T17:21:31,433 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8474bb755f84fada0b70ad7853bac52, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1733764888701 2024-12-09T17:21:31,434 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c1954484a224529b699769e47e0aa11, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1733764889336 2024-12-09T17:21:31,447 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#C#compaction#105 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:31,447 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/c5ad53520a6e483abda6fa2862306a6a is 50, key is test_row_0/C:col10/1733764890453/Put/seqid=0 2024-12-09T17:21:31,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741944_1120 (size=13459) 2024-12-09T17:21:31,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. as already flushing 2024-12-09T17:21:31,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:31,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:31,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 303 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764951603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:31,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:31,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 290 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764951605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:31,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-09T17:21:31,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:31,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 305 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764951706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:31,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:31,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 292 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764951707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:31,791 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/dcb99e2320534bfabc9dafe7c6190090 2024-12-09T17:21:31,794 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/cb80b0ceacda46cda07fc563e30b753c as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/cb80b0ceacda46cda07fc563e30b753c 2024-12-09T17:21:31,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/7916405b7abb4e2099531f0de2533a36 is 50, key is test_row_0/B:col10/1733764890471/Put/seqid=0 2024-12-09T17:21:31,802 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/B of 326764652e67b313fc217edc01a9dfcb into cb80b0ceacda46cda07fc563e30b753c(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:31,802 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:31,802 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/B, priority=13, startTime=1733764891333; duration=0sec 2024-12-09T17:21:31,802 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:31,802 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:B 2024-12-09T17:21:31,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741945_1121 (size=12301) 2024-12-09T17:21:31,859 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/c5ad53520a6e483abda6fa2862306a6a as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/c5ad53520a6e483abda6fa2862306a6a 2024-12-09T17:21:31,864 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/C of 326764652e67b313fc217edc01a9dfcb into c5ad53520a6e483abda6fa2862306a6a(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:31,864 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:31,864 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/C, priority=13, startTime=1733764891333; duration=0sec 2024-12-09T17:21:31,864 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:31,864 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:C 2024-12-09T17:21:31,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:31,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:31,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 294 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764951909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:31,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 307 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764951909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:32,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-09T17:21:32,206 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/7916405b7abb4e2099531f0de2533a36 2024-12-09T17:21:32,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/0514fd8e36f84466b703695e01f7533a is 50, key is test_row_0/C:col10/1733764890471/Put/seqid=0 2024-12-09T17:21:32,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:32,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 309 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764952213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:32,214 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:32,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 296 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764952213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:32,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741946_1122 (size=12301) 2024-12-09T17:21:32,618 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/0514fd8e36f84466b703695e01f7533a 2024-12-09T17:21:32,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/dcb99e2320534bfabc9dafe7c6190090 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/dcb99e2320534bfabc9dafe7c6190090 2024-12-09T17:21:32,627 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/dcb99e2320534bfabc9dafe7c6190090, entries=150, sequenceid=480, filesize=12.0 K 2024-12-09T17:21:32,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/7916405b7abb4e2099531f0de2533a36 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/7916405b7abb4e2099531f0de2533a36 2024-12-09T17:21:32,632 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/7916405b7abb4e2099531f0de2533a36, entries=150, sequenceid=480, filesize=12.0 K 2024-12-09T17:21:32,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/0514fd8e36f84466b703695e01f7533a as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/0514fd8e36f84466b703695e01f7533a 2024-12-09T17:21:32,639 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/0514fd8e36f84466b703695e01f7533a, entries=150, sequenceid=480, filesize=12.0 K 2024-12-09T17:21:32,639 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 326764652e67b313fc217edc01a9dfcb in 1300ms, sequenceid=480, compaction requested=false 2024-12-09T17:21:32,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:32,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:32,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=35 2024-12-09T17:21:32,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=35 2024-12-09T17:21:32,642 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-12-09T17:21:32,642 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6070 sec 2024-12-09T17:21:32,643 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees in 1.6100 sec 2024-12-09T17:21:32,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:32,718 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-09T17:21:32,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:32,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:32,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:32,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:32,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:32,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:32,724 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/3495cac5312942f8af2cad89736e0bf3 is 50, key is test_row_0/A:col10/1733764892718/Put/seqid=0 2024-12-09T17:21:32,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:32,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:32,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 302 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764952726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:32,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 317 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764952726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:32,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741947_1123 (size=14741) 2024-12-09T17:21:32,799 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3ba01639 to 127.0.0.1:54326 2024-12-09T17:21:32,799 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:21:32,799 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4bf8e82a to 127.0.0.1:54326 2024-12-09T17:21:32,799 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:21:32,800 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x24890c79 to 127.0.0.1:54326 2024-12-09T17:21:32,801 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51cab508 to 127.0.0.1:54326 2024-12-09T17:21:32,801 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:21:32,801 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:21:32,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:32,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 304 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764952829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:32,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:32,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 319 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764952829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:33,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:33,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:33,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 321 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764953032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:33,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 306 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764953032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:33,129 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=508 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/3495cac5312942f8af2cad89736e0bf3 2024-12-09T17:21:33,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-09T17:21:33,138 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-12-09T17:21:33,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/5c28e323c69146f0a439f81d122d9f90 is 50, key is test_row_0/B:col10/1733764892718/Put/seqid=0 2024-12-09T17:21:33,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741948_1124 (size=12301) 2024-12-09T17:21:33,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:33,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 308 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764953335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:33,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:33,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 323 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764953336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:33,547 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=508 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/5c28e323c69146f0a439f81d122d9f90 2024-12-09T17:21:33,561 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/8791ddb19a96473bb0c83cf4e77ce834 is 50, key is test_row_0/C:col10/1733764892718/Put/seqid=0 2024-12-09T17:21:33,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741949_1125 (size=12301) 2024-12-09T17:21:33,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:33,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 310 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36772 deadline: 1733764953840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:33,840 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:33,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 325 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36808 deadline: 1733764953840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:33,857 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T17:21:33,967 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=508 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/8791ddb19a96473bb0c83cf4e77ce834 2024-12-09T17:21:33,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/3495cac5312942f8af2cad89736e0bf3 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/3495cac5312942f8af2cad89736e0bf3 2024-12-09T17:21:33,977 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/3495cac5312942f8af2cad89736e0bf3, entries=200, sequenceid=508, filesize=14.4 K 2024-12-09T17:21:33,978 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/5c28e323c69146f0a439f81d122d9f90 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/5c28e323c69146f0a439f81d122d9f90 2024-12-09T17:21:33,982 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/5c28e323c69146f0a439f81d122d9f90, entries=150, sequenceid=508, filesize=12.0 K 2024-12-09T17:21:33,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/8791ddb19a96473bb0c83cf4e77ce834 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/8791ddb19a96473bb0c83cf4e77ce834 2024-12-09T17:21:33,987 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/8791ddb19a96473bb0c83cf4e77ce834, entries=150, sequenceid=508, filesize=12.0 K 2024-12-09T17:21:33,988 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 326764652e67b313fc217edc01a9dfcb in 1270ms, sequenceid=508, compaction requested=true 2024-12-09T17:21:33,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:33,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:21:33,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:33,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:21:33,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:33,988 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:33,988 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:33,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 326764652e67b313fc217edc01a9dfcb:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:21:33,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:33,989 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40501 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:33,989 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:33,989 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/B is initiating minor compaction (all files) 2024-12-09T17:21:33,989 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/A is initiating minor compaction (all files) 2024-12-09T17:21:33,989 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/B in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:33,989 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/A in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:33,990 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/cb80b0ceacda46cda07fc563e30b753c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/7916405b7abb4e2099531f0de2533a36, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/5c28e323c69146f0a439f81d122d9f90] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=37.2 K 2024-12-09T17:21:33,990 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ef4bcd78bb6a440cbd4fdaf09a6a3bd7, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/dcb99e2320534bfabc9dafe7c6190090, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/3495cac5312942f8af2cad89736e0bf3] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=39.6 K 2024-12-09T17:21:33,990 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting cb80b0ceacda46cda07fc563e30b753c, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1733764889336 2024-12-09T17:21:33,990 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef4bcd78bb6a440cbd4fdaf09a6a3bd7, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1733764889336 2024-12-09T17:21:33,990 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting dcb99e2320534bfabc9dafe7c6190090, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1733764890467 2024-12-09T17:21:33,990 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 7916405b7abb4e2099531f0de2533a36, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1733764890467 2024-12-09T17:21:33,990 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3495cac5312942f8af2cad89736e0bf3, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=508, earliestPutTs=1733764891601 2024-12-09T17:21:33,990 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c28e323c69146f0a439f81d122d9f90, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=508, earliestPutTs=1733764891604 2024-12-09T17:21:33,997 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#A#compaction#111 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:33,997 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#B#compaction#112 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:33,997 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/1e90f2bdebaf4154a9f04a60a0aa5d1b is 50, key is test_row_0/A:col10/1733764892718/Put/seqid=0 2024-12-09T17:21:33,997 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/9cf3d82849c947c09c03deaf5e5550cb is 50, key is test_row_0/B:col10/1733764892718/Put/seqid=0 2024-12-09T17:21:34,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741951_1127 (size=13561) 2024-12-09T17:21:34,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741950_1126 (size=13561) 2024-12-09T17:21:34,407 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/9cf3d82849c947c09c03deaf5e5550cb as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/9cf3d82849c947c09c03deaf5e5550cb 2024-12-09T17:21:34,407 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/1e90f2bdebaf4154a9f04a60a0aa5d1b as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/1e90f2bdebaf4154a9f04a60a0aa5d1b 2024-12-09T17:21:34,412 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/A of 326764652e67b313fc217edc01a9dfcb into 1e90f2bdebaf4154a9f04a60a0aa5d1b(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:34,412 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/B of 326764652e67b313fc217edc01a9dfcb into 9cf3d82849c947c09c03deaf5e5550cb(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:34,412 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:34,412 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:34,412 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/B, priority=13, startTime=1733764893988; duration=0sec 2024-12-09T17:21:34,412 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/A, priority=13, startTime=1733764893988; duration=0sec 2024-12-09T17:21:34,412 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:34,412 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:B 2024-12-09T17:21:34,412 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:34,412 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:A 2024-12-09T17:21:34,412 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:34,413 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:34,413 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 326764652e67b313fc217edc01a9dfcb/C is initiating minor compaction (all files) 2024-12-09T17:21:34,413 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 326764652e67b313fc217edc01a9dfcb/C in TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:34,414 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/c5ad53520a6e483abda6fa2862306a6a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/0514fd8e36f84466b703695e01f7533a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/8791ddb19a96473bb0c83cf4e77ce834] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp, totalSize=37.2 K 2024-12-09T17:21:34,414 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting c5ad53520a6e483abda6fa2862306a6a, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1733764889336 2024-12-09T17:21:34,414 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 0514fd8e36f84466b703695e01f7533a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=480, earliestPutTs=1733764890467 2024-12-09T17:21:34,415 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 8791ddb19a96473bb0c83cf4e77ce834, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=508, earliestPutTs=1733764891604 2024-12-09T17:21:34,422 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 326764652e67b313fc217edc01a9dfcb#C#compaction#113 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:34,422 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/390789afe3d04f859fcffb49f8949148 is 50, key is test_row_0/C:col10/1733764892718/Put/seqid=0 2024-12-09T17:21:34,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741952_1128 (size=13561) 2024-12-09T17:21:34,832 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/390789afe3d04f859fcffb49f8949148 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/390789afe3d04f859fcffb49f8949148 2024-12-09T17:21:34,837 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 326764652e67b313fc217edc01a9dfcb/C of 326764652e67b313fc217edc01a9dfcb into 390789afe3d04f859fcffb49f8949148(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:34,837 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:34,837 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb., storeName=326764652e67b313fc217edc01a9dfcb/C, priority=13, startTime=1733764893988; duration=0sec 2024-12-09T17:21:34,837 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:34,837 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 326764652e67b313fc217edc01a9dfcb:C 2024-12-09T17:21:34,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:34,859 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-09T17:21:34,859 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x251efa5e to 127.0.0.1:54326 2024-12-09T17:21:34,859 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x774bf929 to 127.0.0.1:54326 2024-12-09T17:21:34,859 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:21:34,859 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:21:34,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:34,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:34,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:34,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:34,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:34,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:34,864 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/b4c0887c349c45afafabe9998b27cc32 is 50, key is test_row_0/A:col10/1733764892723/Put/seqid=0 2024-12-09T17:21:34,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741953_1129 (size=12301) 2024-12-09T17:21:35,268 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=524 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/b4c0887c349c45afafabe9998b27cc32 2024-12-09T17:21:35,277 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/618ba167c7d04d4084de6f169e627e07 is 50, key is test_row_0/B:col10/1733764892723/Put/seqid=0 2024-12-09T17:21:35,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741954_1130 (size=12301) 2024-12-09T17:21:35,682 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=524 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/618ba167c7d04d4084de6f169e627e07 2024-12-09T17:21:35,689 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/9adcda5e66794cdeacff4cb473555374 is 50, key is test_row_0/C:col10/1733764892723/Put/seqid=0 2024-12-09T17:21:35,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741955_1131 (size=12301) 2024-12-09T17:21:35,948 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x67f7d3d3 to 127.0.0.1:54326 2024-12-09T17:21:35,948 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:21:36,003 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x54af89df to 127.0.0.1:54326 2024-12-09T17:21:36,003 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:21:36,011 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x28808bb9 to 127.0.0.1:54326 2024-12-09T17:21:36,011 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:21:36,011 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-09T17:21:36,011 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 30 2024-12-09T17:21:36,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 169 2024-12-09T17:21:36,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 30 2024-12-09T17:21:36,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 154 2024-12-09T17:21:36,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 26 2024-12-09T17:21:36,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-09T17:21:36,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7527 2024-12-09T17:21:36,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7394 2024-12-09T17:21:36,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-09T17:21:36,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3206 2024-12-09T17:21:36,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9616 rows 2024-12-09T17:21:36,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3196 2024-12-09T17:21:36,012 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9586 rows 2024-12-09T17:21:36,012 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-09T17:21:36,012 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x038196d7 to 127.0.0.1:54326 2024-12-09T17:21:36,012 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:21:36,015 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-09T17:21:36,019 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-09T17:21:36,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-09T17:21:36,026 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733764896025"}]},"ts":"1733764896025"} 2024-12-09T17:21:36,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-09T17:21:36,027 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-09T17:21:36,033 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-09T17:21:36,035 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-09T17:21:36,039 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=326764652e67b313fc217edc01a9dfcb, UNASSIGN}] 2024-12-09T17:21:36,040 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=326764652e67b313fc217edc01a9dfcb, UNASSIGN 2024-12-09T17:21:36,041 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=326764652e67b313fc217edc01a9dfcb, regionState=CLOSING, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:36,042 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T17:21:36,042 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; CloseRegionProcedure 326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379}] 2024-12-09T17:21:36,094 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=524 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/9adcda5e66794cdeacff4cb473555374 2024-12-09T17:21:36,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/b4c0887c349c45afafabe9998b27cc32 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/b4c0887c349c45afafabe9998b27cc32 2024-12-09T17:21:36,103 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/b4c0887c349c45afafabe9998b27cc32, entries=150, sequenceid=524, filesize=12.0 K 2024-12-09T17:21:36,104 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/618ba167c7d04d4084de6f169e627e07 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/618ba167c7d04d4084de6f169e627e07 2024-12-09T17:21:36,108 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/618ba167c7d04d4084de6f169e627e07, entries=150, sequenceid=524, filesize=12.0 K 2024-12-09T17:21:36,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/9adcda5e66794cdeacff4cb473555374 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/9adcda5e66794cdeacff4cb473555374 2024-12-09T17:21:36,114 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/9adcda5e66794cdeacff4cb473555374, entries=150, sequenceid=524, filesize=12.0 K 2024-12-09T17:21:36,115 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=20.13 KB/20610 for 326764652e67b313fc217edc01a9dfcb in 1255ms, sequenceid=524, compaction requested=false 2024-12-09T17:21:36,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:36,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-09T17:21:36,197 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:36,198 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(124): Close 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:36,199 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-09T17:21:36,199 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1681): Closing 326764652e67b313fc217edc01a9dfcb, disabling compactions & flushes 2024-12-09T17:21:36,199 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:36,199 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:36,199 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. after waiting 0 ms 2024-12-09T17:21:36,199 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:36,199 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(2837): Flushing 326764652e67b313fc217edc01a9dfcb 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-09T17:21:36,199 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=A 2024-12-09T17:21:36,200 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:36,200 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=B 2024-12-09T17:21:36,200 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:36,200 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 326764652e67b313fc217edc01a9dfcb, store=C 2024-12-09T17:21:36,200 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:36,203 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/43570f2e71aa42d6bbf5e7a53b027c7d is 50, key is test_row_0/A:col10/1733764896010/Put/seqid=0 2024-12-09T17:21:36,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741956_1132 (size=9857) 2024-12-09T17:21:36,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-09T17:21:36,608 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=530 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/43570f2e71aa42d6bbf5e7a53b027c7d 2024-12-09T17:21:36,614 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/2a6a2a29d765448c8d6a087abeddded5 is 50, key is test_row_0/B:col10/1733764896010/Put/seqid=0 2024-12-09T17:21:36,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741957_1133 (size=9857) 2024-12-09T17:21:36,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-09T17:21:37,019 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=530 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/2a6a2a29d765448c8d6a087abeddded5 2024-12-09T17:21:37,028 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/67f2c811b72e4c9d854a4a1dbe2157df is 50, key is test_row_0/C:col10/1733764896010/Put/seqid=0 2024-12-09T17:21:37,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741958_1134 (size=9857) 2024-12-09T17:21:37,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-09T17:21:37,432 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=530 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/67f2c811b72e4c9d854a4a1dbe2157df 2024-12-09T17:21:37,437 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/A/43570f2e71aa42d6bbf5e7a53b027c7d as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/43570f2e71aa42d6bbf5e7a53b027c7d 2024-12-09T17:21:37,441 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/43570f2e71aa42d6bbf5e7a53b027c7d, entries=100, sequenceid=530, filesize=9.6 K 2024-12-09T17:21:37,442 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/B/2a6a2a29d765448c8d6a087abeddded5 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/2a6a2a29d765448c8d6a087abeddded5 2024-12-09T17:21:37,447 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/2a6a2a29d765448c8d6a087abeddded5, entries=100, sequenceid=530, filesize=9.6 K 2024-12-09T17:21:37,448 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/.tmp/C/67f2c811b72e4c9d854a4a1dbe2157df as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/67f2c811b72e4c9d854a4a1dbe2157df 2024-12-09T17:21:37,453 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/67f2c811b72e4c9d854a4a1dbe2157df, entries=100, sequenceid=530, filesize=9.6 K 2024-12-09T17:21:37,453 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 326764652e67b313fc217edc01a9dfcb in 1254ms, sequenceid=530, compaction requested=true 2024-12-09T17:21:37,454 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/36d940e2a3a34dd7b13dea64b335fb0e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/8fc7f4549fde40c298aa4a10d05e6203, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/e96bf0fa5736429f992a9a1be7685c19, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/28af4dcccb54490d828508ac38a991d6, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/c5c4dba065f547cc83000823df56b469, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/e510462aa0104cf09b6100ef85c91341, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/f67a84b8ce0b4f0a9630bd4a7e572023, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/52a6f2d5a0e24ee0ab7c7294e7cb0643, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/60fdf6be160b42069c1ce2d04609f481, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/5990d6fa0dc8491fbfebb457b678e3ef, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/2f293c21ebef47c5a1003d5c02699b3c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/d0b4ee066873457794ee2810af08e553, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ac2677149b6a4e6dac6c3c85a5d951cc, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/babbcb91ab024608b14c4b740712e24c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/2424ed53ab8043b887c27bf8447f6c46, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/a5b828d58f234303ac6b46f8bd365f53, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/999e9ccdbf49454c85e19c5ce48bcf41, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/42f7051588124fdabeb913398e17fb08, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/0ea9a79d1b2b4f828b2c3948e6583c28, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/737504b789e5431e9f55fd48ea5866fa, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/5922eb62805e4267b2195d525ccac526, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/3dd535cd640041fe9e70b5fcf303ce84, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/b3d85fa0083242d79405052fe5fc14bc, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ac22e22322b44bd9bf8f67c95ee0f053, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ba217eb877ab4cc68ccb90616644bd9f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/857979f34c1f4e4bac46b1c69c0919fe, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/8a3745e2d4b34899b955f04be1ec2a69, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/b877fd552ec747dfbd3ab153b9cd3c90, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/09a1c70f14b44c4183e0c55b4fc62556, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/d1cae0485945448f97359991100173f6, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/106a615ac32c48f68ef3cd4fde9270ae, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/4e166b6bb4d341b1afa4077922bfd791, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/71f01b684c62499d827668fe1a7a25de, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ac74c9b6308f4b1c859f02fc3ca31442, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ef4bcd78bb6a440cbd4fdaf09a6a3bd7, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/dcb99e2320534bfabc9dafe7c6190090, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/3495cac5312942f8af2cad89736e0bf3] to archive 2024-12-09T17:21:37,457 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T17:21:37,462 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/36d940e2a3a34dd7b13dea64b335fb0e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/36d940e2a3a34dd7b13dea64b335fb0e 2024-12-09T17:21:37,464 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/8fc7f4549fde40c298aa4a10d05e6203 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/8fc7f4549fde40c298aa4a10d05e6203 2024-12-09T17:21:37,465 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/e96bf0fa5736429f992a9a1be7685c19 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/e96bf0fa5736429f992a9a1be7685c19 2024-12-09T17:21:37,467 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/28af4dcccb54490d828508ac38a991d6 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/28af4dcccb54490d828508ac38a991d6 2024-12-09T17:21:37,468 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/c5c4dba065f547cc83000823df56b469 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/c5c4dba065f547cc83000823df56b469 2024-12-09T17:21:37,470 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/e510462aa0104cf09b6100ef85c91341 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/e510462aa0104cf09b6100ef85c91341 2024-12-09T17:21:37,471 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/f67a84b8ce0b4f0a9630bd4a7e572023 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/f67a84b8ce0b4f0a9630bd4a7e572023 2024-12-09T17:21:37,473 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/52a6f2d5a0e24ee0ab7c7294e7cb0643 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/52a6f2d5a0e24ee0ab7c7294e7cb0643 2024-12-09T17:21:37,474 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/60fdf6be160b42069c1ce2d04609f481 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/60fdf6be160b42069c1ce2d04609f481 2024-12-09T17:21:37,475 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/5990d6fa0dc8491fbfebb457b678e3ef to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/5990d6fa0dc8491fbfebb457b678e3ef 2024-12-09T17:21:37,476 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/2f293c21ebef47c5a1003d5c02699b3c to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/2f293c21ebef47c5a1003d5c02699b3c 2024-12-09T17:21:37,478 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/d0b4ee066873457794ee2810af08e553 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/d0b4ee066873457794ee2810af08e553 2024-12-09T17:21:37,479 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ac2677149b6a4e6dac6c3c85a5d951cc to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ac2677149b6a4e6dac6c3c85a5d951cc 2024-12-09T17:21:37,480 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/babbcb91ab024608b14c4b740712e24c to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/babbcb91ab024608b14c4b740712e24c 2024-12-09T17:21:37,481 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/2424ed53ab8043b887c27bf8447f6c46 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/2424ed53ab8043b887c27bf8447f6c46 2024-12-09T17:21:37,482 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/a5b828d58f234303ac6b46f8bd365f53 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/a5b828d58f234303ac6b46f8bd365f53 2024-12-09T17:21:37,483 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/999e9ccdbf49454c85e19c5ce48bcf41 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/999e9ccdbf49454c85e19c5ce48bcf41 2024-12-09T17:21:37,485 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/42f7051588124fdabeb913398e17fb08 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/42f7051588124fdabeb913398e17fb08 2024-12-09T17:21:37,486 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/0ea9a79d1b2b4f828b2c3948e6583c28 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/0ea9a79d1b2b4f828b2c3948e6583c28 2024-12-09T17:21:37,487 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/737504b789e5431e9f55fd48ea5866fa to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/737504b789e5431e9f55fd48ea5866fa 2024-12-09T17:21:37,488 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/5922eb62805e4267b2195d525ccac526 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/5922eb62805e4267b2195d525ccac526 2024-12-09T17:21:37,489 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/3dd535cd640041fe9e70b5fcf303ce84 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/3dd535cd640041fe9e70b5fcf303ce84 2024-12-09T17:21:37,490 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/b3d85fa0083242d79405052fe5fc14bc to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/b3d85fa0083242d79405052fe5fc14bc 2024-12-09T17:21:37,491 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ac22e22322b44bd9bf8f67c95ee0f053 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ac22e22322b44bd9bf8f67c95ee0f053 2024-12-09T17:21:37,492 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ba217eb877ab4cc68ccb90616644bd9f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ba217eb877ab4cc68ccb90616644bd9f 2024-12-09T17:21:37,493 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/857979f34c1f4e4bac46b1c69c0919fe to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/857979f34c1f4e4bac46b1c69c0919fe 2024-12-09T17:21:37,494 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/8a3745e2d4b34899b955f04be1ec2a69 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/8a3745e2d4b34899b955f04be1ec2a69 2024-12-09T17:21:37,495 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/b877fd552ec747dfbd3ab153b9cd3c90 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/b877fd552ec747dfbd3ab153b9cd3c90 2024-12-09T17:21:37,496 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/09a1c70f14b44c4183e0c55b4fc62556 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/09a1c70f14b44c4183e0c55b4fc62556 2024-12-09T17:21:37,497 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/d1cae0485945448f97359991100173f6 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/d1cae0485945448f97359991100173f6 2024-12-09T17:21:37,498 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/106a615ac32c48f68ef3cd4fde9270ae to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/106a615ac32c48f68ef3cd4fde9270ae 2024-12-09T17:21:37,499 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/4e166b6bb4d341b1afa4077922bfd791 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/4e166b6bb4d341b1afa4077922bfd791 2024-12-09T17:21:37,500 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/71f01b684c62499d827668fe1a7a25de to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/71f01b684c62499d827668fe1a7a25de 2024-12-09T17:21:37,501 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ac74c9b6308f4b1c859f02fc3ca31442 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ac74c9b6308f4b1c859f02fc3ca31442 2024-12-09T17:21:37,502 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ef4bcd78bb6a440cbd4fdaf09a6a3bd7 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/ef4bcd78bb6a440cbd4fdaf09a6a3bd7 2024-12-09T17:21:37,503 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/dcb99e2320534bfabc9dafe7c6190090 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/dcb99e2320534bfabc9dafe7c6190090 2024-12-09T17:21:37,504 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/3495cac5312942f8af2cad89736e0bf3 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/3495cac5312942f8af2cad89736e0bf3 2024-12-09T17:21:37,518 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/debc8c9ade334d319d34438bcbfaaf4a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/c661f75b2cd64b4aa7aa1ac6fb28324f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/d4c3169fc0064982aaf0cac49ad58369, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/aa12b89bbd6644f18357bfa8f89f4dc1, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e26b76c812f349d2a9ada11001d0d4d4, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/4f7224c5ba914322aba0261243d5c966, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/eb9a1003d3194cd8a97e8cb91ba10ff2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/ef7b2492e2dc49b6aa52328c5b7981bd, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e5266ca72d464d39b12e1434dc67f924, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/13379488dd7a49668c31cbeb09aab1cd, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/71813fb9209a4706b46cee2c6d29a8a8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/98e0e95fbd8c44378e47a09e8c90a814, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/f06160a2a93a4f4aa1893be963013d40, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/100bd26c7ec9451db4589394c615521e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e996e4f753714b74b81760ac81dfa938, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/ae22810cd670408dab3b99625cbd28a8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/3dfd6ae83c254e54ab3d7abeb9bed9b3, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/496d600e0f724f42a8325e2703f87bbe, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/a3c851b3906b4224a8368186ed952b12, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/2cb14648c49046168191683e6ee1c686, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/6b410e61dc9d400d8e3356768b89fd02, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/c1e1e7dac33f42519870a0e6b5c45417, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/5d65c1a8aefe4417a8744151115e13f1, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/6e38bb7d6cac4fa6a024f7ff378e330d, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/0defed453edd4d5093e5a554cff132db, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/62a04eba61f1415cb2bdb2efbe5b25a9, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/77e8aea254cf43c2be96ec2dde0320d0, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e75e65dfbf1445e6a147cc1a5fe9f215, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/0c6f025fad4847f79b4c6a6b175bd626, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/bf8734d6846d4039a83fd61e437a6cb5, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/f7a18a0ee252446ca4f69916666ad9bd, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/7754add93b4d4378b67a7909fcd8fa63, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/2e35abb8ffb44102b01386496c06b774, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/cb80b0ceacda46cda07fc563e30b753c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/6a62cd98f5c2424ba8e3f0a9ada41950, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/7916405b7abb4e2099531f0de2533a36, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/5c28e323c69146f0a439f81d122d9f90] to archive 2024-12-09T17:21:37,519 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T17:21:37,521 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/debc8c9ade334d319d34438bcbfaaf4a to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/debc8c9ade334d319d34438bcbfaaf4a 2024-12-09T17:21:37,523 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/c661f75b2cd64b4aa7aa1ac6fb28324f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/c661f75b2cd64b4aa7aa1ac6fb28324f 2024-12-09T17:21:37,524 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/d4c3169fc0064982aaf0cac49ad58369 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/d4c3169fc0064982aaf0cac49ad58369 2024-12-09T17:21:37,525 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/aa12b89bbd6644f18357bfa8f89f4dc1 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/aa12b89bbd6644f18357bfa8f89f4dc1 2024-12-09T17:21:37,526 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e26b76c812f349d2a9ada11001d0d4d4 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e26b76c812f349d2a9ada11001d0d4d4 2024-12-09T17:21:37,527 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/4f7224c5ba914322aba0261243d5c966 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/4f7224c5ba914322aba0261243d5c966 2024-12-09T17:21:37,529 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/eb9a1003d3194cd8a97e8cb91ba10ff2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/eb9a1003d3194cd8a97e8cb91ba10ff2 2024-12-09T17:21:37,530 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/ef7b2492e2dc49b6aa52328c5b7981bd to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/ef7b2492e2dc49b6aa52328c5b7981bd 2024-12-09T17:21:37,531 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e5266ca72d464d39b12e1434dc67f924 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e5266ca72d464d39b12e1434dc67f924 2024-12-09T17:21:37,532 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/13379488dd7a49668c31cbeb09aab1cd to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/13379488dd7a49668c31cbeb09aab1cd 2024-12-09T17:21:37,533 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/71813fb9209a4706b46cee2c6d29a8a8 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/71813fb9209a4706b46cee2c6d29a8a8 2024-12-09T17:21:37,535 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/98e0e95fbd8c44378e47a09e8c90a814 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/98e0e95fbd8c44378e47a09e8c90a814 2024-12-09T17:21:37,537 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/f06160a2a93a4f4aa1893be963013d40 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/f06160a2a93a4f4aa1893be963013d40 2024-12-09T17:21:37,538 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/100bd26c7ec9451db4589394c615521e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/100bd26c7ec9451db4589394c615521e 2024-12-09T17:21:37,539 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e996e4f753714b74b81760ac81dfa938 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e996e4f753714b74b81760ac81dfa938 2024-12-09T17:21:37,540 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/ae22810cd670408dab3b99625cbd28a8 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/ae22810cd670408dab3b99625cbd28a8 2024-12-09T17:21:37,541 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/3dfd6ae83c254e54ab3d7abeb9bed9b3 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/3dfd6ae83c254e54ab3d7abeb9bed9b3 2024-12-09T17:21:37,542 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/496d600e0f724f42a8325e2703f87bbe to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/496d600e0f724f42a8325e2703f87bbe 2024-12-09T17:21:37,543 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/a3c851b3906b4224a8368186ed952b12 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/a3c851b3906b4224a8368186ed952b12 2024-12-09T17:21:37,544 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/2cb14648c49046168191683e6ee1c686 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/2cb14648c49046168191683e6ee1c686 2024-12-09T17:21:37,545 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/6b410e61dc9d400d8e3356768b89fd02 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/6b410e61dc9d400d8e3356768b89fd02 2024-12-09T17:21:37,546 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/c1e1e7dac33f42519870a0e6b5c45417 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/c1e1e7dac33f42519870a0e6b5c45417 2024-12-09T17:21:37,547 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/5d65c1a8aefe4417a8744151115e13f1 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/5d65c1a8aefe4417a8744151115e13f1 2024-12-09T17:21:37,548 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/6e38bb7d6cac4fa6a024f7ff378e330d to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/6e38bb7d6cac4fa6a024f7ff378e330d 2024-12-09T17:21:37,549 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/0defed453edd4d5093e5a554cff132db to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/0defed453edd4d5093e5a554cff132db 2024-12-09T17:21:37,550 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/62a04eba61f1415cb2bdb2efbe5b25a9 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/62a04eba61f1415cb2bdb2efbe5b25a9 2024-12-09T17:21:37,551 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/77e8aea254cf43c2be96ec2dde0320d0 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/77e8aea254cf43c2be96ec2dde0320d0 2024-12-09T17:21:37,552 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e75e65dfbf1445e6a147cc1a5fe9f215 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/e75e65dfbf1445e6a147cc1a5fe9f215 2024-12-09T17:21:37,553 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/0c6f025fad4847f79b4c6a6b175bd626 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/0c6f025fad4847f79b4c6a6b175bd626 2024-12-09T17:21:37,554 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/bf8734d6846d4039a83fd61e437a6cb5 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/bf8734d6846d4039a83fd61e437a6cb5 2024-12-09T17:21:37,555 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/f7a18a0ee252446ca4f69916666ad9bd to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/f7a18a0ee252446ca4f69916666ad9bd 2024-12-09T17:21:37,556 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/7754add93b4d4378b67a7909fcd8fa63 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/7754add93b4d4378b67a7909fcd8fa63 2024-12-09T17:21:37,557 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/2e35abb8ffb44102b01386496c06b774 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/2e35abb8ffb44102b01386496c06b774 2024-12-09T17:21:37,558 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/cb80b0ceacda46cda07fc563e30b753c to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/cb80b0ceacda46cda07fc563e30b753c 2024-12-09T17:21:37,559 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/6a62cd98f5c2424ba8e3f0a9ada41950 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/6a62cd98f5c2424ba8e3f0a9ada41950 2024-12-09T17:21:37,559 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/7916405b7abb4e2099531f0de2533a36 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/7916405b7abb4e2099531f0de2533a36 2024-12-09T17:21:37,560 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/5c28e323c69146f0a439f81d122d9f90 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/5c28e323c69146f0a439f81d122d9f90 2024-12-09T17:21:37,561 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/4f776badca464780873fb439252af78f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/06d73677d2c74626ba348a1ac68a762c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/ba8cb8f160224447a4631c6642b87384, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/60c86e9ed0fb4c62a0d1aee538f742de, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/253e2c9adf4843c6911d2695264740ef, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/be02eed04a68446f9526ad5401f152cc, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/8882a91221304dd3b064646cc45d78f0, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/7c757cae816a4d808c76438df8888863, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/d126935431034bf5a2d4177cf878ceef, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/823b7307e8b947a0832c35088d38933f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/dbf90c7e784841e2bc8d544808b0b927, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/704caaeb334c4b11af33cf66cb7ff4d5, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/4dfa444a673e4d0ab0043405e3088edc, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/0c304ca6e44441d49bf532ae94320482, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/36489855f21240248937199d6d19ed3c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/6b5377a4d72e4845a6f9ccf009e0bb35, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/ff9806278edb4e4ead962f90d1b1018b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/81eda911d1ed4cb382dd5989d687a705, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/f760ea9581b94dfeaaf78f80bd0ea9f1, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/b46ba932499041dd85baaa5a658b3065, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/cb24957be7fc4ba0b8fb6868203c94b1, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/b3b1d258f2c340a58c8b14416c2c0bc7, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/f10305c95f594ec6ace64e9174297cd6, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/97111753fa254298a9256fdf155e71bc, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/f868d1b74c614e12898675ca311591a5, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/6e2f37839cde417ebef695cce01ba2ce, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/46637fdfaacc4140b98940fbcb9bff0e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/aa44a4192be44e0a9f24f65c809a4801, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/5e24116fd9a14fa7ac81d446974d90e5, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/efa5a3c301434fe3b027441c35d9def1, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/1be88bb7e5024a57b81cc8d6b174664c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/daabd65757144c24ae25b3ad7ef60163, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/e8474bb755f84fada0b70ad7853bac52, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/c5ad53520a6e483abda6fa2862306a6a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/4c1954484a224529b699769e47e0aa11, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/0514fd8e36f84466b703695e01f7533a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/8791ddb19a96473bb0c83cf4e77ce834] to archive 2024-12-09T17:21:37,562 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T17:21:37,564 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/4f776badca464780873fb439252af78f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/4f776badca464780873fb439252af78f 2024-12-09T17:21:37,565 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/06d73677d2c74626ba348a1ac68a762c to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/06d73677d2c74626ba348a1ac68a762c 2024-12-09T17:21:37,566 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/ba8cb8f160224447a4631c6642b87384 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/ba8cb8f160224447a4631c6642b87384 2024-12-09T17:21:37,567 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/60c86e9ed0fb4c62a0d1aee538f742de to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/60c86e9ed0fb4c62a0d1aee538f742de 2024-12-09T17:21:37,568 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/253e2c9adf4843c6911d2695264740ef to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/253e2c9adf4843c6911d2695264740ef 2024-12-09T17:21:37,569 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/be02eed04a68446f9526ad5401f152cc to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/be02eed04a68446f9526ad5401f152cc 2024-12-09T17:21:37,570 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/8882a91221304dd3b064646cc45d78f0 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/8882a91221304dd3b064646cc45d78f0 2024-12-09T17:21:37,571 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/7c757cae816a4d808c76438df8888863 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/7c757cae816a4d808c76438df8888863 2024-12-09T17:21:37,572 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/d126935431034bf5a2d4177cf878ceef to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/d126935431034bf5a2d4177cf878ceef 2024-12-09T17:21:37,573 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/823b7307e8b947a0832c35088d38933f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/823b7307e8b947a0832c35088d38933f 2024-12-09T17:21:37,574 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/dbf90c7e784841e2bc8d544808b0b927 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/dbf90c7e784841e2bc8d544808b0b927 2024-12-09T17:21:37,575 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/704caaeb334c4b11af33cf66cb7ff4d5 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/704caaeb334c4b11af33cf66cb7ff4d5 2024-12-09T17:21:37,577 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/4dfa444a673e4d0ab0043405e3088edc to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/4dfa444a673e4d0ab0043405e3088edc 2024-12-09T17:21:37,578 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/0c304ca6e44441d49bf532ae94320482 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/0c304ca6e44441d49bf532ae94320482 2024-12-09T17:21:37,579 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/36489855f21240248937199d6d19ed3c to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/36489855f21240248937199d6d19ed3c 2024-12-09T17:21:37,580 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/6b5377a4d72e4845a6f9ccf009e0bb35 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/6b5377a4d72e4845a6f9ccf009e0bb35 2024-12-09T17:21:37,581 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/ff9806278edb4e4ead962f90d1b1018b to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/ff9806278edb4e4ead962f90d1b1018b 2024-12-09T17:21:37,582 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/81eda911d1ed4cb382dd5989d687a705 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/81eda911d1ed4cb382dd5989d687a705 2024-12-09T17:21:37,584 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/f760ea9581b94dfeaaf78f80bd0ea9f1 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/f760ea9581b94dfeaaf78f80bd0ea9f1 2024-12-09T17:21:37,585 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/b46ba932499041dd85baaa5a658b3065 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/b46ba932499041dd85baaa5a658b3065 2024-12-09T17:21:37,586 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/cb24957be7fc4ba0b8fb6868203c94b1 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/cb24957be7fc4ba0b8fb6868203c94b1 2024-12-09T17:21:37,587 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/b3b1d258f2c340a58c8b14416c2c0bc7 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/b3b1d258f2c340a58c8b14416c2c0bc7 2024-12-09T17:21:37,588 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/f10305c95f594ec6ace64e9174297cd6 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/f10305c95f594ec6ace64e9174297cd6 2024-12-09T17:21:37,589 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/97111753fa254298a9256fdf155e71bc to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/97111753fa254298a9256fdf155e71bc 2024-12-09T17:21:37,591 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/f868d1b74c614e12898675ca311591a5 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/f868d1b74c614e12898675ca311591a5 2024-12-09T17:21:37,591 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/6e2f37839cde417ebef695cce01ba2ce to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/6e2f37839cde417ebef695cce01ba2ce 2024-12-09T17:21:37,592 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/46637fdfaacc4140b98940fbcb9bff0e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/46637fdfaacc4140b98940fbcb9bff0e 2024-12-09T17:21:37,593 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/aa44a4192be44e0a9f24f65c809a4801 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/aa44a4192be44e0a9f24f65c809a4801 2024-12-09T17:21:37,594 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/5e24116fd9a14fa7ac81d446974d90e5 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/5e24116fd9a14fa7ac81d446974d90e5 2024-12-09T17:21:37,595 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/efa5a3c301434fe3b027441c35d9def1 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/efa5a3c301434fe3b027441c35d9def1 2024-12-09T17:21:37,596 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/1be88bb7e5024a57b81cc8d6b174664c to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/1be88bb7e5024a57b81cc8d6b174664c 2024-12-09T17:21:37,597 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/daabd65757144c24ae25b3ad7ef60163 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/daabd65757144c24ae25b3ad7ef60163 2024-12-09T17:21:37,598 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/e8474bb755f84fada0b70ad7853bac52 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/e8474bb755f84fada0b70ad7853bac52 2024-12-09T17:21:37,599 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/c5ad53520a6e483abda6fa2862306a6a to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/c5ad53520a6e483abda6fa2862306a6a 2024-12-09T17:21:37,601 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/4c1954484a224529b699769e47e0aa11 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/4c1954484a224529b699769e47e0aa11 2024-12-09T17:21:37,602 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/0514fd8e36f84466b703695e01f7533a to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/0514fd8e36f84466b703695e01f7533a 2024-12-09T17:21:37,603 DEBUG [StoreCloser-TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/8791ddb19a96473bb0c83cf4e77ce834 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/8791ddb19a96473bb0c83cf4e77ce834 2024-12-09T17:21:37,608 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/recovered.edits/533.seqid, newMaxSeqId=533, maxSeqId=1 2024-12-09T17:21:37,610 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb. 2024-12-09T17:21:37,610 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1635): Region close journal for 326764652e67b313fc217edc01a9dfcb: 2024-12-09T17:21:37,612 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(170): Closed 326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:37,613 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=326764652e67b313fc217edc01a9dfcb, regionState=CLOSED 2024-12-09T17:21:37,615 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-12-09T17:21:37,615 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; CloseRegionProcedure 326764652e67b313fc217edc01a9dfcb, server=80c69eb3c456,42927,1733764865379 in 1.5720 sec 2024-12-09T17:21:37,616 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=37 2024-12-09T17:21:37,617 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=37, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=326764652e67b313fc217edc01a9dfcb, UNASSIGN in 1.5760 sec 2024-12-09T17:21:37,618 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-12-09T17:21:37,618 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5820 sec 2024-12-09T17:21:37,620 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733764897619"}]},"ts":"1733764897619"} 2024-12-09T17:21:37,621 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-09T17:21:37,667 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-09T17:21:37,668 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6460 sec 2024-12-09T17:21:38,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-09T17:21:38,131 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 36 completed 2024-12-09T17:21:38,134 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-09T17:21:38,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=40, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:21:38,138 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=40, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:21:38,139 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=40, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:21:38,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-12-09T17:21:38,142 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:38,146 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A, FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B, FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C, FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/recovered.edits] 2024-12-09T17:21:38,150 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/1e90f2bdebaf4154a9f04a60a0aa5d1b to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/1e90f2bdebaf4154a9f04a60a0aa5d1b 2024-12-09T17:21:38,151 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/43570f2e71aa42d6bbf5e7a53b027c7d to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/43570f2e71aa42d6bbf5e7a53b027c7d 2024-12-09T17:21:38,152 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/b4c0887c349c45afafabe9998b27cc32 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/A/b4c0887c349c45afafabe9998b27cc32 2024-12-09T17:21:38,154 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/2a6a2a29d765448c8d6a087abeddded5 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/2a6a2a29d765448c8d6a087abeddded5 2024-12-09T17:21:38,156 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/618ba167c7d04d4084de6f169e627e07 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/618ba167c7d04d4084de6f169e627e07 2024-12-09T17:21:38,157 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/9cf3d82849c947c09c03deaf5e5550cb to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/B/9cf3d82849c947c09c03deaf5e5550cb 2024-12-09T17:21:38,159 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/390789afe3d04f859fcffb49f8949148 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/390789afe3d04f859fcffb49f8949148 2024-12-09T17:21:38,160 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/67f2c811b72e4c9d854a4a1dbe2157df to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/67f2c811b72e4c9d854a4a1dbe2157df 2024-12-09T17:21:38,161 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/9adcda5e66794cdeacff4cb473555374 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/C/9adcda5e66794cdeacff4cb473555374 2024-12-09T17:21:38,164 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/recovered.edits/533.seqid to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb/recovered.edits/533.seqid 2024-12-09T17:21:38,165 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/326764652e67b313fc217edc01a9dfcb 2024-12-09T17:21:38,165 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-09T17:21:38,170 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=40, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:21:38,173 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-09T17:21:38,176 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-09T17:21:38,202 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-09T17:21:38,203 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=40, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:21:38,203 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-09T17:21:38,203 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733764898203"}]},"ts":"9223372036854775807"} 2024-12-09T17:21:38,206 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-09T17:21:38,206 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 326764652e67b313fc217edc01a9dfcb, NAME => 'TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb.', STARTKEY => '', ENDKEY => ''}] 2024-12-09T17:21:38,206 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-09T17:21:38,206 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733764898206"}]},"ts":"9223372036854775807"} 2024-12-09T17:21:38,209 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-09T17:21:38,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-12-09T17:21:38,251 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=40, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:21:38,252 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 117 msec 2024-12-09T17:21:38,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-12-09T17:21:38,443 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 40 completed 2024-12-09T17:21:38,455 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=239 (was 219) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x7e149453-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1178314848_22 at /127.0.0.1:35378 [Waiting for operation #386] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x7e149453-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;80c69eb3c456:42927-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_88836206_22 at /127.0.0.1:43700 [Waiting for operation #364] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x7e149453-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x7e149453-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=454 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=308 (was 185) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4277 (was 4795) 2024-12-09T17:21:38,466 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=239, OpenFileDescriptor=454, MaxFileDescriptor=1048576, SystemLoadAverage=308, ProcessCount=11, AvailableMemoryMB=4277 2024-12-09T17:21:38,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-09T17:21:38,468 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T17:21:38,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-09T17:21:38,470 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T17:21:38,470 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:38,470 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 41 2024-12-09T17:21:38,471 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T17:21:38,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-09T17:21:38,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741959_1135 (size=963) 2024-12-09T17:21:38,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-09T17:21:38,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-09T17:21:38,889 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4 2024-12-09T17:21:38,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741960_1136 (size=53) 2024-12-09T17:21:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-09T17:21:39,299 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T17:21:39,299 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 20e312d1737c5c0e923e8e7c9efe02a2, disabling compactions & flushes 2024-12-09T17:21:39,299 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:39,299 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:39,299 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. after waiting 0 ms 2024-12-09T17:21:39,300 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:39,300 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:39,300 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:39,301 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T17:21:39,301 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733764899301"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733764899301"}]},"ts":"1733764899301"} 2024-12-09T17:21:39,303 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-09T17:21:39,305 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T17:21:39,305 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733764899305"}]},"ts":"1733764899305"} 2024-12-09T17:21:39,307 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-09T17:21:39,325 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=20e312d1737c5c0e923e8e7c9efe02a2, ASSIGN}] 2024-12-09T17:21:39,326 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=20e312d1737c5c0e923e8e7c9efe02a2, ASSIGN 2024-12-09T17:21:39,327 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=20e312d1737c5c0e923e8e7c9efe02a2, ASSIGN; state=OFFLINE, location=80c69eb3c456,42927,1733764865379; forceNewPlan=false, retain=false 2024-12-09T17:21:39,477 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=20e312d1737c5c0e923e8e7c9efe02a2, regionState=OPENING, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:39,480 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE; OpenRegionProcedure 20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379}] 2024-12-09T17:21:39,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-09T17:21:39,634 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:39,638 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:39,638 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7285): Opening region: {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} 2024-12-09T17:21:39,639 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:39,639 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T17:21:39,639 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7327): checking encryption for 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:39,639 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7330): checking classloading for 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:39,641 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:39,643 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:21:39,644 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 20e312d1737c5c0e923e8e7c9efe02a2 columnFamilyName A 2024-12-09T17:21:39,644 DEBUG [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:39,645 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] regionserver.HStore(327): Store=20e312d1737c5c0e923e8e7c9efe02a2/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:21:39,645 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:39,647 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:21:39,648 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 20e312d1737c5c0e923e8e7c9efe02a2 columnFamilyName B 2024-12-09T17:21:39,648 DEBUG [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:39,649 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] regionserver.HStore(327): Store=20e312d1737c5c0e923e8e7c9efe02a2/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:21:39,649 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:39,650 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:21:39,650 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 20e312d1737c5c0e923e8e7c9efe02a2 columnFamilyName C 2024-12-09T17:21:39,650 DEBUG [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:39,651 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] regionserver.HStore(327): Store=20e312d1737c5c0e923e8e7c9efe02a2/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:21:39,651 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:39,652 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:39,652 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:39,654 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T17:21:39,655 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1085): writing seq id for 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:39,657 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T17:21:39,658 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1102): Opened 20e312d1737c5c0e923e8e7c9efe02a2; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63172033, jitterRate=-0.05866335332393646}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T17:21:39,659 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1001): Region open journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:39,659 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., pid=43, masterSystemTime=1733764899634 2024-12-09T17:21:39,661 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:39,661 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:39,661 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=20e312d1737c5c0e923e8e7c9efe02a2, regionState=OPEN, openSeqNum=2, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:39,664 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-12-09T17:21:39,664 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; OpenRegionProcedure 20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 in 182 msec 2024-12-09T17:21:39,666 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-12-09T17:21:39,666 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=20e312d1737c5c0e923e8e7c9efe02a2, ASSIGN in 339 msec 2024-12-09T17:21:39,667 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T17:21:39,667 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733764899667"}]},"ts":"1733764899667"} 2024-12-09T17:21:39,668 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-09T17:21:39,676 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T17:21:39,678 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2080 sec 2024-12-09T17:21:40,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-09T17:21:40,578 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 41 completed 2024-12-09T17:21:40,580 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7a8da76b to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7f3c14c0 2024-12-09T17:21:40,593 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3242ee55, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:40,595 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:21:40,598 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41154, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:21:40,600 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T17:21:40,603 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40350, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T17:21:40,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-09T17:21:40,612 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T17:21:40,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=44, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-09T17:21:40,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741961_1137 (size=999) 2024-12-09T17:21:41,033 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-09T17:21:41,033 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-09T17:21:41,039 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-09T17:21:41,048 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=20e312d1737c5c0e923e8e7c9efe02a2, REOPEN/MOVE}] 2024-12-09T17:21:41,049 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=20e312d1737c5c0e923e8e7c9efe02a2, REOPEN/MOVE 2024-12-09T17:21:41,050 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=20e312d1737c5c0e923e8e7c9efe02a2, regionState=CLOSING, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:41,050 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T17:21:41,051 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=47, ppid=46, state=RUNNABLE; CloseRegionProcedure 20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379}] 2024-12-09T17:21:41,202 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:41,204 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(124): Close 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:41,204 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-09T17:21:41,204 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1681): Closing 20e312d1737c5c0e923e8e7c9efe02a2, disabling compactions & flushes 2024-12-09T17:21:41,204 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:41,204 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:41,204 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. after waiting 0 ms 2024-12-09T17:21:41,204 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:41,212 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-09T17:21:41,213 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:41,214 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1635): Region close journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:41,214 WARN [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegionServer(3786): Not adding moved region record: 20e312d1737c5c0e923e8e7c9efe02a2 to self. 2024-12-09T17:21:41,216 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(170): Closed 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:41,217 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=20e312d1737c5c0e923e8e7c9efe02a2, regionState=CLOSED 2024-12-09T17:21:41,221 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=47, resume processing ppid=46 2024-12-09T17:21:41,221 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, ppid=46, state=SUCCESS; CloseRegionProcedure 20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 in 167 msec 2024-12-09T17:21:41,222 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=20e312d1737c5c0e923e8e7c9efe02a2, REOPEN/MOVE; state=CLOSED, location=80c69eb3c456,42927,1733764865379; forceNewPlan=false, retain=true 2024-12-09T17:21:41,372 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=20e312d1737c5c0e923e8e7c9efe02a2, regionState=OPENING, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:41,375 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=46, state=RUNNABLE; OpenRegionProcedure 20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379}] 2024-12-09T17:21:41,527 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:41,531 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:41,531 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7285): Opening region: {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} 2024-12-09T17:21:41,531 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:41,531 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T17:21:41,531 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7327): checking encryption for 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:41,532 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7330): checking classloading for 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:41,534 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:41,535 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:21:41,541 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 20e312d1737c5c0e923e8e7c9efe02a2 columnFamilyName A 2024-12-09T17:21:41,542 DEBUG [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:41,543 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] regionserver.HStore(327): Store=20e312d1737c5c0e923e8e7c9efe02a2/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:21:41,543 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:41,544 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:21:41,544 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 20e312d1737c5c0e923e8e7c9efe02a2 columnFamilyName B 2024-12-09T17:21:41,544 DEBUG [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:41,545 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] regionserver.HStore(327): Store=20e312d1737c5c0e923e8e7c9efe02a2/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:21:41,545 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:41,546 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:21:41,546 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 20e312d1737c5c0e923e8e7c9efe02a2 columnFamilyName C 2024-12-09T17:21:41,546 DEBUG [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:41,546 INFO [StoreOpener-20e312d1737c5c0e923e8e7c9efe02a2-1 {}] regionserver.HStore(327): Store=20e312d1737c5c0e923e8e7c9efe02a2/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:21:41,547 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:41,547 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:41,549 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:41,550 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T17:21:41,552 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1085): writing seq id for 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:41,553 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1102): Opened 20e312d1737c5c0e923e8e7c9efe02a2; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64467370, jitterRate=-0.03936132788658142}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T17:21:41,554 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1001): Region open journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:41,554 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., pid=48, masterSystemTime=1733764901527 2024-12-09T17:21:41,556 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:41,556 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:41,557 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=20e312d1737c5c0e923e8e7c9efe02a2, regionState=OPEN, openSeqNum=5, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:41,559 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=46 2024-12-09T17:21:41,559 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=46, state=SUCCESS; OpenRegionProcedure 20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 in 184 msec 2024-12-09T17:21:41,561 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-12-09T17:21:41,561 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=20e312d1737c5c0e923e8e7c9efe02a2, REOPEN/MOVE in 511 msec 2024-12-09T17:21:41,564 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=44 2024-12-09T17:21:41,564 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=44, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 524 msec 2024-12-09T17:21:41,566 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 950 msec 2024-12-09T17:21:41,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=44 2024-12-09T17:21:41,574 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7435a904 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a0aa7d7 2024-12-09T17:21:41,640 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@87b269f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:41,641 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2d006bed to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@231f064 2024-12-09T17:21:41,657 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a7bf7fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:41,659 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5b8e1501 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@28c904d8 2024-12-09T17:21:41,668 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15736fcc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:41,671 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x70b41629 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6d6eb994 2024-12-09T17:21:41,682 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24ebde20, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:41,683 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3a0312cf to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a63fed4 2024-12-09T17:21:41,693 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40832d66, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:41,694 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x706b2cde to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@29dad7a8 2024-12-09T17:21:41,701 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ec46f90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:41,702 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d5e0e3f to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62c6fdab 2024-12-09T17:21:41,709 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f63b68c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:41,710 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x70f48df4 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@250a1de4 2024-12-09T17:21:41,723 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@473f181f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:41,724 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3cd5be36 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@49456175 2024-12-09T17:21:41,734 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@768577a2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:21:41,738 DEBUG [hconnection-0x2d0dff3f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:21:41,738 DEBUG [hconnection-0x612c3cc7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:21:41,738 DEBUG [hconnection-0x34ae5286-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:21:41,738 DEBUG [hconnection-0x14bed04e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:21:41,739 DEBUG [hconnection-0x684dd52d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:21:41,740 DEBUG [hconnection-0x61a943e8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:21:41,740 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41198, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:21:41,740 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41178, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:21:41,740 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:21:41,740 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41170, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:21:41,741 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41180, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:21:41,741 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41206, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:21:41,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-12-09T17:21:41,742 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41212, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:21:41,743 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:21:41,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-09T17:21:41,744 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:21:41,744 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:21:41,744 DEBUG [hconnection-0x1b7e183d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:21:41,744 DEBUG [hconnection-0x60b41400-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:21:41,746 DEBUG [hconnection-0x79bfd5bf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:21:41,746 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41228, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:21:41,747 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41230, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:21:41,749 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41244, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:21:41,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:41,759 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 20e312d1737c5c0e923e8e7c9efe02a2 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-09T17:21:41,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=A 2024-12-09T17:21:41,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:41,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=B 2024-12-09T17:21:41,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:41,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=C 2024-12-09T17:21:41,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:41,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:41,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41228 deadline: 1733764961778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:41,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:41,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764961779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:41,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:41,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1733764961780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:41,783 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:41,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764961783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:41,783 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:41,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41230 deadline: 1733764961783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:41,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-09T17:21:41,847 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120923c9e9df32af43d996e8798b9fc5c357_20e312d1737c5c0e923e8e7c9efe02a2 is 50, key is test_row_0/A:col10/1733764901752/Put/seqid=0 2024-12-09T17:21:41,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741962_1138 (size=12154) 2024-12-09T17:21:41,863 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:41,870 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120923c9e9df32af43d996e8798b9fc5c357_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120923c9e9df32af43d996e8798b9fc5c357_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:41,872 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/c666380445de45c89cf2b0f025761d47, store: [table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:41,883 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/c666380445de45c89cf2b0f025761d47 is 175, key is test_row_0/A:col10/1733764901752/Put/seqid=0 2024-12-09T17:21:41,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:41,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41228 deadline: 1733764961884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:41,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:41,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:41,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764961885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:41,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764961884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:41,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:41,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1733764961885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:41,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:41,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41230 deadline: 1733764961885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:41,897 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:41,897 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-09T17:21:41,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:41,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:41,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:41,898 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:41,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:41,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:41,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741963_1139 (size=30955) 2024-12-09T17:21:41,921 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=18, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/c666380445de45c89cf2b0f025761d47 2024-12-09T17:21:41,960 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/cf95aac0ad31444e849776e6f9d6cd75 is 50, key is test_row_0/B:col10/1733764901752/Put/seqid=0 2024-12-09T17:21:41,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741964_1140 (size=12001) 2024-12-09T17:21:41,995 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/cf95aac0ad31444e849776e6f9d6cd75 2024-12-09T17:21:42,039 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/be985125491f4fffb330c2bf3129be05 is 50, key is test_row_0/C:col10/1733764901752/Put/seqid=0 2024-12-09T17:21:42,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-09T17:21:42,052 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:42,052 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-09T17:21:42,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:42,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:42,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:42,053 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:42,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:42,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:42,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741965_1141 (size=12001) 2024-12-09T17:21:42,062 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/be985125491f4fffb330c2bf3129be05 2024-12-09T17:21:42,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/c666380445de45c89cf2b0f025761d47 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/c666380445de45c89cf2b0f025761d47 2024-12-09T17:21:42,076 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/c666380445de45c89cf2b0f025761d47, entries=150, sequenceid=18, filesize=30.2 K 2024-12-09T17:21:42,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/cf95aac0ad31444e849776e6f9d6cd75 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/cf95aac0ad31444e849776e6f9d6cd75 2024-12-09T17:21:42,091 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/cf95aac0ad31444e849776e6f9d6cd75, entries=150, sequenceid=18, filesize=11.7 K 2024-12-09T17:21:42,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/be985125491f4fffb330c2bf3129be05 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/be985125491f4fffb330c2bf3129be05 2024-12-09T17:21:42,092 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:42,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41228 deadline: 1733764962089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:42,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:42,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764962089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:42,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:42,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1733764962090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:42,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:42,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764962090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:42,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:42,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41230 deadline: 1733764962090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:42,130 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/be985125491f4fffb330c2bf3129be05, entries=150, sequenceid=18, filesize=11.7 K 2024-12-09T17:21:42,131 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=140.89 KB/144270 for 20e312d1737c5c0e923e8e7c9efe02a2 in 372ms, sequenceid=18, compaction requested=false 2024-12-09T17:21:42,131 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-09T17:21:42,132 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:42,205 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:42,206 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-09T17:21:42,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:42,206 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 20e312d1737c5c0e923e8e7c9efe02a2 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-09T17:21:42,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=A 2024-12-09T17:21:42,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:42,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=B 2024-12-09T17:21:42,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:42,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=C 2024-12-09T17:21:42,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:42,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120950ef158cb6c54345a5d9c6a29934cecd_20e312d1737c5c0e923e8e7c9efe02a2 is 50, key is test_row_0/A:col10/1733764901776/Put/seqid=0 2024-12-09T17:21:42,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741966_1142 (size=12154) 2024-12-09T17:21:42,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,256 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120950ef158cb6c54345a5d9c6a29934cecd_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120950ef158cb6c54345a5d9c6a29934cecd_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:42,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/df80a7069ab3496aabe3c362c1e94a96, store: [table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:42,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/df80a7069ab3496aabe3c362c1e94a96 is 175, key is test_row_0/A:col10/1733764901776/Put/seqid=0 2024-12-09T17:21:42,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741967_1143 (size=30955) 2024-12-09T17:21:42,280 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/df80a7069ab3496aabe3c362c1e94a96 2024-12-09T17:21:42,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/0a5fadb73e234b5f9976e5100275cfaf is 50, key is test_row_0/B:col10/1733764901776/Put/seqid=0 2024-12-09T17:21:42,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741968_1144 (size=12001) 2024-12-09T17:21:42,324 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/0a5fadb73e234b5f9976e5100275cfaf 2024-12-09T17:21:42,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/ac9db662380b4bf390f0a332faeae2cb is 50, key is test_row_0/C:col10/1733764901776/Put/seqid=0 2024-12-09T17:21:42,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-09T17:21:42,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741969_1145 (size=12001) 2024-12-09T17:21:42,385 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/ac9db662380b4bf390f0a332faeae2cb 2024-12-09T17:21:42,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/df80a7069ab3496aabe3c362c1e94a96 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/df80a7069ab3496aabe3c362c1e94a96 2024-12-09T17:21:42,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:42,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:42,397 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/df80a7069ab3496aabe3c362c1e94a96, entries=150, sequenceid=42, filesize=30.2 K 2024-12-09T17:21:42,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/0a5fadb73e234b5f9976e5100275cfaf as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/0a5fadb73e234b5f9976e5100275cfaf 2024-12-09T17:21:42,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,405 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/0a5fadb73e234b5f9976e5100275cfaf, entries=150, sequenceid=42, filesize=11.7 K 2024-12-09T17:21:42,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/ac9db662380b4bf390f0a332faeae2cb as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/ac9db662380b4bf390f0a332faeae2cb 2024-12-09T17:21:42,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,412 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/ac9db662380b4bf390f0a332faeae2cb, entries=150, sequenceid=42, filesize=11.7 K 2024-12-09T17:21:42,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,413 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=13.42 KB/13740 for 20e312d1737c5c0e923e8e7c9efe02a2 in 207ms, sequenceid=42, compaction requested=false 2024-12-09T17:21:42,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:42,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:42,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-12-09T17:21:42,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-12-09T17:21:42,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,416 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-09T17:21:42,416 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 671 msec 2024-12-09T17:21:42,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,418 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 677 msec 2024-12-09T17:21:42,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:42,480 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 20e312d1737c5c0e923e8e7c9efe02a2 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-09T17:21:42,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=A 2024-12-09T17:21:42,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:42,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=B 2024-12-09T17:21:42,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:42,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=C 2024-12-09T17:21:42,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:42,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,501 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209d8af33cf2dab4824b3d472c95d45a8e7_20e312d1737c5c0e923e8e7c9efe02a2 is 50, key is test_row_0/A:col10/1733764902480/Put/seqid=0 2024-12-09T17:21:42,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:42,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764962518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:42,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:42,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41228 deadline: 1733764962519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:42,524 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:42,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41230 deadline: 1733764962521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:42,525 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:42,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1733764962521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:42,525 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:42,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764962521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:42,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741971_1147 (size=26798) 2024-12-09T17:21:42,533 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:42,538 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209d8af33cf2dab4824b3d472c95d45a8e7_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209d8af33cf2dab4824b3d472c95d45a8e7_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:42,541 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/8518b7abc64b4127ab1175cf7248a7a6, store: [table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:42,542 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/8518b7abc64b4127ab1175cf7248a7a6 is 175, key is test_row_0/A:col10/1733764902480/Put/seqid=0 2024-12-09T17:21:42,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741970_1146 (size=82585) 2024-12-09T17:21:42,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:42,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764962623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:42,624 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:42,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41228 deadline: 1733764962624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:42,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:42,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41230 deadline: 1733764962626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:42,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:42,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1733764962627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:42,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:42,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764962627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:42,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:42,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41228 deadline: 1733764962826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:42,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:42,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764962826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:42,830 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:42,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41230 deadline: 1733764962830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:42,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:42,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1733764962836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:42,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:42,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764962841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:42,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-09T17:21:42,853 INFO [Thread-680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-12-09T17:21:42,855 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:21:42,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-12-09T17:21:42,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-09T17:21:42,858 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:21:42,859 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:21:42,859 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:21:42,864 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T17:21:42,865 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40356, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T17:21:42,946 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/8518b7abc64b4127ab1175cf7248a7a6 2024-12-09T17:21:42,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-09T17:21:42,969 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/518b36c7c7eb4e32b9fea3667f569093 is 50, key is test_row_0/B:col10/1733764902480/Put/seqid=0 2024-12-09T17:21:42,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741972_1148 (size=12001) 2024-12-09T17:21:42,993 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/518b36c7c7eb4e32b9fea3667f569093 2024-12-09T17:21:43,011 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,011 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-09T17:21:43,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:43,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:43,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:43,011 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:43,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:43,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:43,025 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/9145093d25d346efa4eb4b9a29cc5b9e is 50, key is test_row_0/C:col10/1733764902480/Put/seqid=0 2024-12-09T17:21:43,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741973_1149 (size=12001) 2024-12-09T17:21:43,034 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/9145093d25d346efa4eb4b9a29cc5b9e 2024-12-09T17:21:43,044 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/8518b7abc64b4127ab1175cf7248a7a6 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/8518b7abc64b4127ab1175cf7248a7a6 2024-12-09T17:21:43,048 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/8518b7abc64b4127ab1175cf7248a7a6, entries=450, sequenceid=54, filesize=80.6 K 2024-12-09T17:21:43,049 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/518b36c7c7eb4e32b9fea3667f569093 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/518b36c7c7eb4e32b9fea3667f569093 2024-12-09T17:21:43,054 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/518b36c7c7eb4e32b9fea3667f569093, entries=150, sequenceid=54, filesize=11.7 K 2024-12-09T17:21:43,055 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/9145093d25d346efa4eb4b9a29cc5b9e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/9145093d25d346efa4eb4b9a29cc5b9e 2024-12-09T17:21:43,059 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/9145093d25d346efa4eb4b9a29cc5b9e, entries=150, sequenceid=54, filesize=11.7 K 2024-12-09T17:21:43,061 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for 20e312d1737c5c0e923e8e7c9efe02a2 in 581ms, sequenceid=54, compaction requested=true 2024-12-09T17:21:43,061 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:43,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:21:43,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:43,061 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:43,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:21:43,061 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:43,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:43,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:21:43,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:43,063 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 144495 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:43,063 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/A is initiating minor compaction (all files) 2024-12-09T17:21:43,063 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/A in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:43,063 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/c666380445de45c89cf2b0f025761d47, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/df80a7069ab3496aabe3c362c1e94a96, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/8518b7abc64b4127ab1175cf7248a7a6] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=141.1 K 2024-12-09T17:21:43,063 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:43,063 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. files: [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/c666380445de45c89cf2b0f025761d47, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/df80a7069ab3496aabe3c362c1e94a96, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/8518b7abc64b4127ab1175cf7248a7a6] 2024-12-09T17:21:43,064 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:43,064 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/B is initiating minor compaction (all files) 2024-12-09T17:21:43,064 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/B in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:43,064 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/cf95aac0ad31444e849776e6f9d6cd75, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/0a5fadb73e234b5f9976e5100275cfaf, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/518b36c7c7eb4e32b9fea3667f569093] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=35.2 K 2024-12-09T17:21:43,064 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting c666380445de45c89cf2b0f025761d47, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1733764901747 2024-12-09T17:21:43,066 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting df80a7069ab3496aabe3c362c1e94a96, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733764901776 2024-12-09T17:21:43,066 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting cf95aac0ad31444e849776e6f9d6cd75, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1733764901747 2024-12-09T17:21:43,067 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a5fadb73e234b5f9976e5100275cfaf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733764901776 2024-12-09T17:21:43,067 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8518b7abc64b4127ab1175cf7248a7a6, keycount=450, bloomtype=ROW, size=80.6 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733764902396 2024-12-09T17:21:43,069 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 518b36c7c7eb4e32b9fea3667f569093, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733764902466 2024-12-09T17:21:43,092 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:43,101 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#B#compaction#130 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:43,102 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/48be931eb1154851b877f6503baefb13 is 50, key is test_row_0/B:col10/1733764902480/Put/seqid=0 2024-12-09T17:21:43,112 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412090c7baa66d5314fa0ab165949021efb16_20e312d1737c5c0e923e8e7c9efe02a2 store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:43,118 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412090c7baa66d5314fa0ab165949021efb16_20e312d1737c5c0e923e8e7c9efe02a2, store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:43,119 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412090c7baa66d5314fa0ab165949021efb16_20e312d1737c5c0e923e8e7c9efe02a2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:43,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741974_1150 (size=12104) 2024-12-09T17:21:43,132 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/48be931eb1154851b877f6503baefb13 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/48be931eb1154851b877f6503baefb13 2024-12-09T17:21:43,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:43,134 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 20e312d1737c5c0e923e8e7c9efe02a2 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-09T17:21:43,135 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=A 2024-12-09T17:21:43,135 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:43,135 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=B 2024-12-09T17:21:43,135 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:43,135 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=C 2024-12-09T17:21:43,135 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:43,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741975_1151 (size=4469) 2024-12-09T17:21:43,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-09T17:21:43,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209bf25ccea32264229ab1ee3c0ad369c21_20e312d1737c5c0e923e8e7c9efe02a2 is 50, key is test_row_0/A:col10/1733764903133/Put/seqid=0 2024-12-09T17:21:43,163 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-09T17:21:43,164 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,164 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-09T17:21:43,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:43,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:43,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:43,164 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:43,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:43,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:43,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741976_1152 (size=17034) 2024-12-09T17:21:43,189 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/B of 20e312d1737c5c0e923e8e7c9efe02a2 into 48be931eb1154851b877f6503baefb13(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:43,190 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:43,190 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/B, priority=13, startTime=1733764903061; duration=0sec 2024-12-09T17:21:43,190 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:43,190 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:B 2024-12-09T17:21:43,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:43,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:43,190 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:43,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41228 deadline: 1733764963142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764963144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,192 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:43,193 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/C is initiating minor compaction (all files) 2024-12-09T17:21:43,193 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/C in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:43,193 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/be985125491f4fffb330c2bf3129be05, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/ac9db662380b4bf390f0a332faeae2cb, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/9145093d25d346efa4eb4b9a29cc5b9e] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=35.2 K 2024-12-09T17:21:43,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:43,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41230 deadline: 1733764963191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:43,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764963191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,197 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting be985125491f4fffb330c2bf3129be05, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1733764901747 2024-12-09T17:21:43,197 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:43,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1733764963192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,197 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting ac9db662380b4bf390f0a332faeae2cb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733764901776 2024-12-09T17:21:43,198 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9145093d25d346efa4eb4b9a29cc5b9e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733764902466 2024-12-09T17:21:43,212 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#C#compaction#132 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:43,212 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/3958d990216243479d6ee3a98511b109 is 50, key is test_row_0/C:col10/1733764902480/Put/seqid=0 2024-12-09T17:21:43,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741977_1153 (size=12104) 2024-12-09T17:21:43,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:43,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41228 deadline: 1733764963293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:43,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41230 deadline: 1733764963297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,300 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:43,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764963298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:43,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1733764963299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,317 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-09T17:21:43,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:43,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:43,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:43,336 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:43,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:43,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:43,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-09T17:21:43,490 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,490 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-09T17:21:43,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:43,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:43,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:43,491 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:43,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:43,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:43,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:43,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41228 deadline: 1733764963497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:43,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41230 deadline: 1733764963500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:43,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764963503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:43,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1733764963504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,554 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#A#compaction#129 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:43,556 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/864bae374bff47ffb32d73f3c0eab616 is 175, key is test_row_0/A:col10/1733764902480/Put/seqid=0 2024-12-09T17:21:43,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741978_1154 (size=31058) 2024-12-09T17:21:43,578 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:43,585 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209bf25ccea32264229ab1ee3c0ad369c21_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209bf25ccea32264229ab1ee3c0ad369c21_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:43,589 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/c52875f7ed184bfeb42e535f57a849ce, store: [table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:43,590 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/c52875f7ed184bfeb42e535f57a849ce is 175, key is test_row_0/A:col10/1733764903133/Put/seqid=0 2024-12-09T17:21:43,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741979_1155 (size=48139) 2024-12-09T17:21:43,614 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=82, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/c52875f7ed184bfeb42e535f57a849ce 2024-12-09T17:21:43,627 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/3958d990216243479d6ee3a98511b109 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/3958d990216243479d6ee3a98511b109 2024-12-09T17:21:43,629 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/f8fe7322be0b40cab9492682bfc097a2 is 50, key is test_row_0/B:col10/1733764903133/Put/seqid=0 2024-12-09T17:21:43,635 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/C of 20e312d1737c5c0e923e8e7c9efe02a2 into 3958d990216243479d6ee3a98511b109(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:43,636 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:43,636 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/C, priority=13, startTime=1733764903061; duration=0sec 2024-12-09T17:21:43,636 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:43,636 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:C 2024-12-09T17:21:43,643 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,644 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-09T17:21:43,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:43,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:43,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:43,644 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:43,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:43,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:43,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741980_1156 (size=12001) 2024-12-09T17:21:43,670 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/f8fe7322be0b40cab9492682bfc097a2 2024-12-09T17:21:43,689 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/61d920ca3f9e454ab062ab39688f7f9b is 50, key is test_row_0/C:col10/1733764903133/Put/seqid=0 2024-12-09T17:21:43,693 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:43,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764963692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741981_1157 (size=12001) 2024-12-09T17:21:43,698 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/61d920ca3f9e454ab062ab39688f7f9b 2024-12-09T17:21:43,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/c52875f7ed184bfeb42e535f57a849ce as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/c52875f7ed184bfeb42e535f57a849ce 2024-12-09T17:21:43,715 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/c52875f7ed184bfeb42e535f57a849ce, entries=250, sequenceid=82, filesize=47.0 K 2024-12-09T17:21:43,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/f8fe7322be0b40cab9492682bfc097a2 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/f8fe7322be0b40cab9492682bfc097a2 2024-12-09T17:21:43,724 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/f8fe7322be0b40cab9492682bfc097a2, entries=150, sequenceid=82, filesize=11.7 K 2024-12-09T17:21:43,733 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/61d920ca3f9e454ab062ab39688f7f9b as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/61d920ca3f9e454ab062ab39688f7f9b 2024-12-09T17:21:43,741 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/61d920ca3f9e454ab062ab39688f7f9b, entries=150, sequenceid=82, filesize=11.7 K 2024-12-09T17:21:43,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 20e312d1737c5c0e923e8e7c9efe02a2 in 608ms, sequenceid=82, compaction requested=false 2024-12-09T17:21:43,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:43,796 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-09T17:21:43,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:43,797 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 20e312d1737c5c0e923e8e7c9efe02a2 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-09T17:21:43,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=A 2024-12-09T17:21:43,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:43,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=B 2024-12-09T17:21:43,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:43,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=C 2024-12-09T17:21:43,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:43,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:43,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:43,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412090502809e7181488b81a790b363594a5c_20e312d1737c5c0e923e8e7c9efe02a2 is 50, key is test_row_0/A:col10/1733764903146/Put/seqid=0 2024-12-09T17:21:43,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741982_1158 (size=12154) 2024-12-09T17:21:43,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:43,835 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412090502809e7181488b81a790b363594a5c_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412090502809e7181488b81a790b363594a5c_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:43,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/3850cac7da474afba7e6d2dab5d4000e, store: [table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:43,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/3850cac7da474afba7e6d2dab5d4000e is 175, key is test_row_0/A:col10/1733764903146/Put/seqid=0 2024-12-09T17:21:43,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741983_1159 (size=30955) 2024-12-09T17:21:43,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:43,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764963836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:43,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1733764963841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:43,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41230 deadline: 1733764963844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:43,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41228 deadline: 1733764963844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:43,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764963945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:43,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1733764963945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:43,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41230 deadline: 1733764963948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:43,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41228 deadline: 1733764963949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:43,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-09T17:21:43,982 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/864bae374bff47ffb32d73f3c0eab616 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/864bae374bff47ffb32d73f3c0eab616 2024-12-09T17:21:43,987 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/A of 20e312d1737c5c0e923e8e7c9efe02a2 into 864bae374bff47ffb32d73f3c0eab616(size=30.3 K), total size for store is 77.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:43,987 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:43,987 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/A, priority=13, startTime=1733764903061; duration=0sec 2024-12-09T17:21:43,987 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:43,987 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:A 2024-12-09T17:21:44,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:44,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764964149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:44,151 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:44,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1733764964150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:44,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:44,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41230 deadline: 1733764964151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:44,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:44,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41228 deadline: 1733764964152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:44,243 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/3850cac7da474afba7e6d2dab5d4000e 2024-12-09T17:21:44,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/37a5327b354b488e81463abe560caae2 is 50, key is test_row_0/B:col10/1733764903146/Put/seqid=0 2024-12-09T17:21:44,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741984_1160 (size=12001) 2024-12-09T17:21:44,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:44,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764964452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:44,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:44,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1733764964454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:44,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:44,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41230 deadline: 1733764964459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:44,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:44,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41228 deadline: 1733764964459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:44,656 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/37a5327b354b488e81463abe560caae2 2024-12-09T17:21:44,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/8bec2c9f721f42f2ba68b21accae8561 is 50, key is test_row_0/C:col10/1733764903146/Put/seqid=0 2024-12-09T17:21:44,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741985_1161 (size=12001) 2024-12-09T17:21:44,669 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/8bec2c9f721f42f2ba68b21accae8561 2024-12-09T17:21:44,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/3850cac7da474afba7e6d2dab5d4000e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/3850cac7da474afba7e6d2dab5d4000e 2024-12-09T17:21:44,681 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/3850cac7da474afba7e6d2dab5d4000e, entries=150, sequenceid=93, filesize=30.2 K 2024-12-09T17:21:44,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/37a5327b354b488e81463abe560caae2 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/37a5327b354b488e81463abe560caae2 2024-12-09T17:21:44,690 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/37a5327b354b488e81463abe560caae2, entries=150, sequenceid=93, filesize=11.7 K 2024-12-09T17:21:44,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/8bec2c9f721f42f2ba68b21accae8561 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/8bec2c9f721f42f2ba68b21accae8561 2024-12-09T17:21:44,696 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/8bec2c9f721f42f2ba68b21accae8561, entries=150, sequenceid=93, filesize=11.7 K 2024-12-09T17:21:44,698 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for 20e312d1737c5c0e923e8e7c9efe02a2 in 900ms, sequenceid=93, compaction requested=true 2024-12-09T17:21:44,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:44,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:44,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-09T17:21:44,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-12-09T17:21:44,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:44,703 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 20e312d1737c5c0e923e8e7c9efe02a2 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-09T17:21:44,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=A 2024-12-09T17:21:44,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:44,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=B 2024-12-09T17:21:44,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:44,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=C 2024-12-09T17:21:44,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:44,707 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-12-09T17:21:44,707 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8400 sec 2024-12-09T17:21:44,712 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.8550 sec 2024-12-09T17:21:44,715 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412099c933828c1444f32889743fe2fd15484_20e312d1737c5c0e923e8e7c9efe02a2 is 50, key is test_row_0/A:col10/1733764903839/Put/seqid=0 2024-12-09T17:21:44,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741986_1162 (size=14594) 2024-12-09T17:21:44,734 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:44,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764964734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:44,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:44,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764964836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:44,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:44,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1733764964959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:44,961 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:44,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764964960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:44,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-09T17:21:44,962 INFO [Thread-680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-12-09T17:21:44,963 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:21:44,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-12-09T17:21:44,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-09T17:21:44,964 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:21:44,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:44,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41230 deadline: 1733764964963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:44,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:44,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41228 deadline: 1733764964964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:44,965 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:21:44,965 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:21:45,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:45,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764965040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:45,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-09T17:21:45,116 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:45,117 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-09T17:21:45,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:45,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:45,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:45,117 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:45,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:45,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:45,129 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,135 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412099c933828c1444f32889743fe2fd15484_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412099c933828c1444f32889743fe2fd15484_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:45,136 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/f58cc3e0982f4856912375cd5d891a84, store: [table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:45,137 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/f58cc3e0982f4856912375cd5d891a84 is 175, key is test_row_0/A:col10/1733764903839/Put/seqid=0 2024-12-09T17:21:45,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741987_1163 (size=39549) 2024-12-09T17:21:45,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-09T17:21:45,269 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:45,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-09T17:21:45,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:45,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:45,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:45,270 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:45,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:45,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:45,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:45,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764965344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:45,422 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:45,422 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-09T17:21:45,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:45,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:45,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:45,423 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:45,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:45,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:45,551 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=123, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/f58cc3e0982f4856912375cd5d891a84 2024-12-09T17:21:45,558 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/192fdbbb539f4249b52014632e2d1bc4 is 50, key is test_row_0/B:col10/1733764903839/Put/seqid=0 2024-12-09T17:21:45,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-09T17:21:45,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741988_1164 (size=12001) 2024-12-09T17:21:45,573 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/192fdbbb539f4249b52014632e2d1bc4 2024-12-09T17:21:45,574 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:45,575 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-09T17:21:45,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:45,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:45,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:45,575 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:45,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:45,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:45,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/e3c2d994321e4a0faf43c6dcdc6d7301 is 50, key is test_row_0/C:col10/1733764903839/Put/seqid=0 2024-12-09T17:21:45,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741989_1165 (size=12001) 2024-12-09T17:21:45,587 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/e3c2d994321e4a0faf43c6dcdc6d7301 2024-12-09T17:21:45,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/f58cc3e0982f4856912375cd5d891a84 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/f58cc3e0982f4856912375cd5d891a84 2024-12-09T17:21:45,598 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/f58cc3e0982f4856912375cd5d891a84, entries=200, sequenceid=123, filesize=38.6 K 2024-12-09T17:21:45,599 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/192fdbbb539f4249b52014632e2d1bc4 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/192fdbbb539f4249b52014632e2d1bc4 2024-12-09T17:21:45,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,605 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/192fdbbb539f4249b52014632e2d1bc4, entries=150, sequenceid=123, filesize=11.7 K 2024-12-09T17:21:45,606 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/e3c2d994321e4a0faf43c6dcdc6d7301 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/e3c2d994321e4a0faf43c6dcdc6d7301 2024-12-09T17:21:45,612 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/e3c2d994321e4a0faf43c6dcdc6d7301, entries=150, sequenceid=123, filesize=11.7 K 2024-12-09T17:21:45,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,613 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 20e312d1737c5c0e923e8e7c9efe02a2 in 910ms, sequenceid=123, compaction requested=true 2024-12-09T17:21:45,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:45,613 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T17:21:45,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:21:45,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:45,615 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T17:21:45,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:21:45,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:45,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:21:45,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:45,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,616 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 149701 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-09T17:21:45,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,616 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/A is initiating minor compaction (all files) 2024-12-09T17:21:45,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,616 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/A in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:45,616 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/864bae374bff47ffb32d73f3c0eab616, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/c52875f7ed184bfeb42e535f57a849ce, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/3850cac7da474afba7e6d2dab5d4000e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/f58cc3e0982f4856912375cd5d891a84] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=146.2 K 2024-12-09T17:21:45,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,616 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:45,616 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. files: [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/864bae374bff47ffb32d73f3c0eab616, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/c52875f7ed184bfeb42e535f57a849ce, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/3850cac7da474afba7e6d2dab5d4000e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/f58cc3e0982f4856912375cd5d891a84] 2024-12-09T17:21:45,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,618 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 864bae374bff47ffb32d73f3c0eab616, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733764902466 2024-12-09T17:21:45,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,619 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting c52875f7ed184bfeb42e535f57a849ce, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733764902512 2024-12-09T17:21:45,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,619 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-09T17:21:45,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,619 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/B is initiating minor compaction (all files) 2024-12-09T17:21:45,619 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/B in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:45,619 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/48be931eb1154851b877f6503baefb13, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/f8fe7322be0b40cab9492682bfc097a2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/37a5327b354b488e81463abe560caae2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/192fdbbb539f4249b52014632e2d1bc4] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=47.0 K 2024-12-09T17:21:45,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,619 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3850cac7da474afba7e6d2dab5d4000e, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733764903139 2024-12-09T17:21:45,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,620 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 48be931eb1154851b877f6503baefb13, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733764902466 2024-12-09T17:21:45,620 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting f58cc3e0982f4856912375cd5d891a84, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1733764903839 2024-12-09T17:21:45,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,622 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting f8fe7322be0b40cab9492682bfc097a2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733764902519 2024-12-09T17:21:45,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,623 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 37a5327b354b488e81463abe560caae2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733764903139 2024-12-09T17:21:45,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,623 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 192fdbbb539f4249b52014632e2d1bc4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1733764903839 2024-12-09T17:21:45,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,635 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:45,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,637 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#B#compaction#142 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:45,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,638 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/72eac906d76d4278a2d5953c7a957e5c is 50, key is test_row_0/B:col10/1733764903839/Put/seqid=0 2024-12-09T17:21:45,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,652 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120905728fed9f2f45bb8784eb8a66c33f05_20e312d1737c5c0e923e8e7c9efe02a2 store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:45,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,656 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120905728fed9f2f45bb8784eb8a66c33f05_20e312d1737c5c0e923e8e7c9efe02a2, store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:45,657 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120905728fed9f2f45bb8784eb8a66c33f05_20e312d1737c5c0e923e8e7c9efe02a2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:45,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741990_1166 (size=12241) 2024-12-09T17:21:45,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,669 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/72eac906d76d4278a2d5953c7a957e5c as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/72eac906d76d4278a2d5953c7a957e5c 2024-12-09T17:21:45,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,682 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/B of 20e312d1737c5c0e923e8e7c9efe02a2 into 72eac906d76d4278a2d5953c7a957e5c(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:45,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,682 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:45,682 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/B, priority=12, startTime=1733764905615; duration=0sec 2024-12-09T17:21:45,682 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:45,682 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:B 2024-12-09T17:21:45,682 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T17:21:45,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741991_1167 (size=4469) 2024-12-09T17:21:45,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,687 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#A#compaction#141 average throughput is 0.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:45,687 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-09T17:21:45,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,688 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/C is initiating minor compaction (all files) 2024-12-09T17:21:45,688 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/C in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:45,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,688 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/3958d990216243479d6ee3a98511b109, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/61d920ca3f9e454ab062ab39688f7f9b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/8bec2c9f721f42f2ba68b21accae8561, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/e3c2d994321e4a0faf43c6dcdc6d7301] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=47.0 K 2024-12-09T17:21:45,688 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/b53a8762f48343179f29ed264e52f1d5 is 175, key is test_row_0/A:col10/1733764903839/Put/seqid=0 2024-12-09T17:21:45,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,689 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 3958d990216243479d6ee3a98511b109, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733764902466 2024-12-09T17:21:45,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,689 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 61d920ca3f9e454ab062ab39688f7f9b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733764902519 2024-12-09T17:21:45,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,690 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 8bec2c9f721f42f2ba68b21accae8561, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733764903139 2024-12-09T17:21:45,691 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting e3c2d994321e4a0faf43c6dcdc6d7301, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1733764903839 2024-12-09T17:21:45,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741992_1168 (size=31195) 2024-12-09T17:21:45,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,717 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/b53a8762f48343179f29ed264e52f1d5 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/b53a8762f48343179f29ed264e52f1d5 2024-12-09T17:21:45,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,725 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/A of 20e312d1737c5c0e923e8e7c9efe02a2 into b53a8762f48343179f29ed264e52f1d5(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:45,725 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:45,725 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/A, priority=12, startTime=1733764905613; duration=0sec 2024-12-09T17:21:45,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,725 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#C#compaction#143 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:45,725 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:45,725 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:A 2024-12-09T17:21:45,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,726 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/288caa3f02ae44eda9d882979bf3daa4 is 50, key is test_row_0/C:col10/1733764903839/Put/seqid=0 2024-12-09T17:21:45,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,728 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:45,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,728 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-09T17:21:45,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:45,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,729 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 20e312d1737c5c0e923e8e7c9efe02a2 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-09T17:21:45,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=A 2024-12-09T17:21:45,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:45,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=B 2024-12-09T17:21:45,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:45,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=C 2024-12-09T17:21:45,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:45,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741993_1169 (size=12241) 2024-12-09T17:21:45,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,740 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/288caa3f02ae44eda9d882979bf3daa4 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/288caa3f02ae44eda9d882979bf3daa4 2024-12-09T17:21:45,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,749 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/C of 20e312d1737c5c0e923e8e7c9efe02a2 into 288caa3f02ae44eda9d882979bf3daa4(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:45,749 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:45,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,749 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/C, priority=12, startTime=1733764905615; duration=0sec 2024-12-09T17:21:45,749 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:45,749 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:C 2024-12-09T17:21:45,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412090b69749fe20148d3b7560ccc23bddf36_20e312d1737c5c0e923e8e7c9efe02a2 is 50, key is test_row_1/A:col10/1733764904732/Put/seqid=0 2024-12-09T17:21:45,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741994_1170 (size=9764) 2024-12-09T17:21:45,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:45,872 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:45,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:45,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:45,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764965936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:45,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:45,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764965966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:45,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:45,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1733764965970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:45,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:45,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41228 deadline: 1733764965973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:45,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:45,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41230 deadline: 1733764965993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:46,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:46,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764966041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:46,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-09T17:21:46,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:46,175 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412090b69749fe20148d3b7560ccc23bddf36_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412090b69749fe20148d3b7560ccc23bddf36_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:46,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/27ba081e176e48ebaf3a3ab146f8380a, store: [table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:46,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/27ba081e176e48ebaf3a3ab146f8380a is 175, key is test_row_1/A:col10/1733764904732/Put/seqid=0 2024-12-09T17:21:46,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741995_1171 (size=22411) 2024-12-09T17:21:46,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:46,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764966243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:46,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:46,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764966547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:46,581 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=132, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/27ba081e176e48ebaf3a3ab146f8380a 2024-12-09T17:21:46,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/82dc7272891b4b2798836e5aea9a735f is 50, key is test_row_1/B:col10/1733764904732/Put/seqid=0 2024-12-09T17:21:46,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741996_1172 (size=9707) 2024-12-09T17:21:46,628 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/82dc7272891b4b2798836e5aea9a735f 2024-12-09T17:21:46,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/d0f76371409249d0ac2060a78971f32d is 50, key is test_row_1/C:col10/1733764904732/Put/seqid=0 2024-12-09T17:21:46,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741997_1173 (size=9707) 2024-12-09T17:21:46,674 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/d0f76371409249d0ac2060a78971f32d 2024-12-09T17:21:46,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/27ba081e176e48ebaf3a3ab146f8380a as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/27ba081e176e48ebaf3a3ab146f8380a 2024-12-09T17:21:46,689 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/27ba081e176e48ebaf3a3ab146f8380a, entries=100, sequenceid=132, filesize=21.9 K 2024-12-09T17:21:46,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/82dc7272891b4b2798836e5aea9a735f as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/82dc7272891b4b2798836e5aea9a735f 2024-12-09T17:21:46,697 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/82dc7272891b4b2798836e5aea9a735f, entries=100, sequenceid=132, filesize=9.5 K 2024-12-09T17:21:46,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/d0f76371409249d0ac2060a78971f32d as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/d0f76371409249d0ac2060a78971f32d 2024-12-09T17:21:46,706 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/d0f76371409249d0ac2060a78971f32d, entries=100, sequenceid=132, filesize=9.5 K 2024-12-09T17:21:46,708 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=174.43 KB/178620 for 20e312d1737c5c0e923e8e7c9efe02a2 in 978ms, sequenceid=132, compaction requested=false 2024-12-09T17:21:46,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:46,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:46,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-09T17:21:46,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-12-09T17:21:46,710 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-12-09T17:21:46,710 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7440 sec 2024-12-09T17:21:46,711 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 1.7470 sec 2024-12-09T17:21:47,055 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 20e312d1737c5c0e923e8e7c9efe02a2 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-09T17:21:47,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=A 2024-12-09T17:21:47,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:47,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=B 2024-12-09T17:21:47,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:47,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=C 2024-12-09T17:21:47,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:47,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:47,064 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209cf8c66971da74fe3b4fff4abf5dbfe87_20e312d1737c5c0e923e8e7c9efe02a2 is 50, key is test_row_0/A:col10/1733764905932/Put/seqid=0 2024-12-09T17:21:47,069 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:47,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764967067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:47,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-09T17:21:47,072 INFO [Thread-680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-12-09T17:21:47,073 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:21:47,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-12-09T17:21:47,075 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:21:47,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-09T17:21:47,078 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:21:47,078 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:21:47,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741998_1174 (size=12304) 2024-12-09T17:21:47,092 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:47,096 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209cf8c66971da74fe3b4fff4abf5dbfe87_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209cf8c66971da74fe3b4fff4abf5dbfe87_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:47,098 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/6a0b0bca6c6e444cb361e27f773b496e, store: [table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:47,098 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/6a0b0bca6c6e444cb361e27f773b496e is 175, key is test_row_0/A:col10/1733764905932/Put/seqid=0 2024-12-09T17:21:47,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741999_1175 (size=31105) 2024-12-09T17:21:47,108 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=163, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/6a0b0bca6c6e444cb361e27f773b496e 2024-12-09T17:21:47,129 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/9ca1d83617f44f95aa460ff762bc3dbd is 50, key is test_row_0/B:col10/1733764905932/Put/seqid=0 2024-12-09T17:21:47,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742000_1176 (size=12151) 2024-12-09T17:21:47,136 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/9ca1d83617f44f95aa460ff762bc3dbd 2024-12-09T17:21:47,144 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/1790990fbedd445c9c30298fded2c5d0 is 50, key is test_row_0/C:col10/1733764905932/Put/seqid=0 2024-12-09T17:21:47,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742001_1177 (size=12151) 2024-12-09T17:21:47,152 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/1790990fbedd445c9c30298fded2c5d0 2024-12-09T17:21:47,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/6a0b0bca6c6e444cb361e27f773b496e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/6a0b0bca6c6e444cb361e27f773b496e 2024-12-09T17:21:47,161 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/6a0b0bca6c6e444cb361e27f773b496e, entries=150, sequenceid=163, filesize=30.4 K 2024-12-09T17:21:47,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/9ca1d83617f44f95aa460ff762bc3dbd as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/9ca1d83617f44f95aa460ff762bc3dbd 2024-12-09T17:21:47,168 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/9ca1d83617f44f95aa460ff762bc3dbd, entries=150, sequenceid=163, filesize=11.9 K 2024-12-09T17:21:47,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/1790990fbedd445c9c30298fded2c5d0 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/1790990fbedd445c9c30298fded2c5d0 2024-12-09T17:21:47,174 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:47,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764967171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:47,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-09T17:21:47,186 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/1790990fbedd445c9c30298fded2c5d0, entries=150, sequenceid=163, filesize=11.9 K 2024-12-09T17:21:47,186 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 20e312d1737c5c0e923e8e7c9efe02a2 in 132ms, sequenceid=163, compaction requested=true 2024-12-09T17:21:47,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:47,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:21:47,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:47,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:21:47,187 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:47,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:47,187 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:47,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:21:47,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:47,189 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:47,189 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/B is initiating minor compaction (all files) 2024-12-09T17:21:47,189 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/B in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:47,189 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/72eac906d76d4278a2d5953c7a957e5c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/82dc7272891b4b2798836e5aea9a735f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/9ca1d83617f44f95aa460ff762bc3dbd] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=33.3 K 2024-12-09T17:21:47,189 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 84711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:47,190 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/A is initiating minor compaction (all files) 2024-12-09T17:21:47,190 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/A in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:47,190 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/b53a8762f48343179f29ed264e52f1d5, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/27ba081e176e48ebaf3a3ab146f8380a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/6a0b0bca6c6e444cb361e27f773b496e] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=82.7 K 2024-12-09T17:21:47,190 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:47,190 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. files: [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/b53a8762f48343179f29ed264e52f1d5, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/27ba081e176e48ebaf3a3ab146f8380a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/6a0b0bca6c6e444cb361e27f773b496e] 2024-12-09T17:21:47,190 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 72eac906d76d4278a2d5953c7a957e5c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1733764903839 2024-12-09T17:21:47,191 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting b53a8762f48343179f29ed264e52f1d5, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1733764903839 2024-12-09T17:21:47,191 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 82dc7272891b4b2798836e5aea9a735f, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733764904705 2024-12-09T17:21:47,192 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27ba081e176e48ebaf3a3ab146f8380a, keycount=100, bloomtype=ROW, size=21.9 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733764904705 2024-12-09T17:21:47,192 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ca1d83617f44f95aa460ff762bc3dbd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733764905926 2024-12-09T17:21:47,192 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a0b0bca6c6e444cb361e27f773b496e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733764905926 2024-12-09T17:21:47,201 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#B#compaction#150 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:47,202 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:47,202 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/dd297821cdde4474bd0355499adcdbb3 is 50, key is test_row_0/B:col10/1733764905932/Put/seqid=0 2024-12-09T17:21:47,208 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241209375faa2ab5bb4c81a5f3fc1ce22d4e3d_20e312d1737c5c0e923e8e7c9efe02a2 store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:47,209 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241209375faa2ab5bb4c81a5f3fc1ce22d4e3d_20e312d1737c5c0e923e8e7c9efe02a2, store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:47,209 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209375faa2ab5bb4c81a5f3fc1ce22d4e3d_20e312d1737c5c0e923e8e7c9efe02a2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:47,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742003_1179 (size=4469) 2024-12-09T17:21:47,231 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:47,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-09T17:21:47,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742002_1178 (size=12493) 2024-12-09T17:21:47,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:47,235 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 20e312d1737c5c0e923e8e7c9efe02a2 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-09T17:21:47,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=A 2024-12-09T17:21:47,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:47,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=B 2024-12-09T17:21:47,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:47,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=C 2024-12-09T17:21:47,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:47,238 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#A#compaction#151 average throughput is 0.68 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:47,238 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/829bdefcf75c403f933dd440f1c3e80f is 175, key is test_row_0/A:col10/1733764905932/Put/seqid=0 2024-12-09T17:21:47,242 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/dd297821cdde4474bd0355499adcdbb3 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/dd297821cdde4474bd0355499adcdbb3 2024-12-09T17:21:47,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742004_1180 (size=31447) 2024-12-09T17:21:47,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209ea223903aa7542cdacb6797493bd1adf_20e312d1737c5c0e923e8e7c9efe02a2 is 50, key is test_row_0/A:col10/1733764907062/Put/seqid=0 2024-12-09T17:21:47,250 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/B of 20e312d1737c5c0e923e8e7c9efe02a2 into dd297821cdde4474bd0355499adcdbb3(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:47,250 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:47,250 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/B, priority=13, startTime=1733764907187; duration=0sec 2024-12-09T17:21:47,250 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:47,251 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:B 2024-12-09T17:21:47,251 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:47,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742005_1181 (size=12304) 2024-12-09T17:21:47,253 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:47,253 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/C is initiating minor compaction (all files) 2024-12-09T17:21:47,253 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/C in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:47,253 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/288caa3f02ae44eda9d882979bf3daa4, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/d0f76371409249d0ac2060a78971f32d, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/1790990fbedd445c9c30298fded2c5d0] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=33.3 K 2024-12-09T17:21:47,254 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 288caa3f02ae44eda9d882979bf3daa4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1733764903839 2024-12-09T17:21:47,254 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting d0f76371409249d0ac2060a78971f32d, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733764904705 2024-12-09T17:21:47,254 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 1790990fbedd445c9c30298fded2c5d0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733764905926 2024-12-09T17:21:47,263 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#C#compaction#153 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:47,263 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/6e2c62eba4b44653a505d5a8b2aeca84 is 50, key is test_row_0/C:col10/1733764905932/Put/seqid=0 2024-12-09T17:21:47,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742006_1182 (size=12493) 2024-12-09T17:21:47,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-09T17:21:47,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:47,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:47,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:47,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764967438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:47,542 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:47,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764967542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:47,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:47,653 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/829bdefcf75c403f933dd440f1c3e80f as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/829bdefcf75c403f933dd440f1c3e80f 2024-12-09T17:21:47,659 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209ea223903aa7542cdacb6797493bd1adf_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209ea223903aa7542cdacb6797493bd1adf_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:47,661 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/A of 20e312d1737c5c0e923e8e7c9efe02a2 into 829bdefcf75c403f933dd440f1c3e80f(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:47,661 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:47,661 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/A, priority=13, startTime=1733764907187; duration=0sec 2024-12-09T17:21:47,661 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:47,661 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:A 2024-12-09T17:21:47,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/4b2b927bb98d423498d0d87f2909e08c, store: [table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:47,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/4b2b927bb98d423498d0d87f2909e08c is 175, key is test_row_0/A:col10/1733764907062/Put/seqid=0 2024-12-09T17:21:47,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742007_1183 (size=31105) 2024-12-09T17:21:47,676 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/6e2c62eba4b44653a505d5a8b2aeca84 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/6e2c62eba4b44653a505d5a8b2aeca84 2024-12-09T17:21:47,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-09T17:21:47,682 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/C of 20e312d1737c5c0e923e8e7c9efe02a2 into 6e2c62eba4b44653a505d5a8b2aeca84(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:47,682 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:47,682 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/C, priority=13, startTime=1733764907187; duration=0sec 2024-12-09T17:21:47,682 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:47,682 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:C 2024-12-09T17:21:47,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:47,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764967745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:47,972 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:47,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764967971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:47,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:47,973 DEBUG [Thread-672 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., hostname=80c69eb3c456,42927,1733764865379, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:21:47,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1733764967973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:47,974 DEBUG [Thread-670 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4133 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., hostname=80c69eb3c456,42927,1733764865379, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:21:47,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:47,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41228 deadline: 1733764967983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:47,984 DEBUG [Thread-676 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4140 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., hostname=80c69eb3c456,42927,1733764865379, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:21:48,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:48,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41230 deadline: 1733764968004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:48,005 DEBUG [Thread-678 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4161 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., hostname=80c69eb3c456,42927,1733764865379, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:21:48,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:48,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764968048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:48,075 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=169, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/4b2b927bb98d423498d0d87f2909e08c 2024-12-09T17:21:48,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/a9369409e70e40fe88bcbca7b22a34ef is 50, key is test_row_0/B:col10/1733764907062/Put/seqid=0 2024-12-09T17:21:48,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742008_1184 (size=12151) 2024-12-09T17:21:48,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-09T17:21:48,487 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/a9369409e70e40fe88bcbca7b22a34ef 2024-12-09T17:21:48,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/7e1bc1334baf4986841cf63f86048e05 is 50, key is test_row_0/C:col10/1733764907062/Put/seqid=0 2024-12-09T17:21:48,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742009_1185 (size=12151) 2024-12-09T17:21:48,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:48,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764968551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:48,898 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/7e1bc1334baf4986841cf63f86048e05 2024-12-09T17:21:48,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/4b2b927bb98d423498d0d87f2909e08c as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/4b2b927bb98d423498d0d87f2909e08c 2024-12-09T17:21:48,906 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/4b2b927bb98d423498d0d87f2909e08c, entries=150, sequenceid=169, filesize=30.4 K 2024-12-09T17:21:48,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/a9369409e70e40fe88bcbca7b22a34ef as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/a9369409e70e40fe88bcbca7b22a34ef 2024-12-09T17:21:48,911 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/a9369409e70e40fe88bcbca7b22a34ef, entries=150, sequenceid=169, filesize=11.9 K 2024-12-09T17:21:48,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/7e1bc1334baf4986841cf63f86048e05 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/7e1bc1334baf4986841cf63f86048e05 2024-12-09T17:21:48,917 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/7e1bc1334baf4986841cf63f86048e05, entries=150, sequenceid=169, filesize=11.9 K 2024-12-09T17:21:48,919 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=181.14 KB/185490 for 20e312d1737c5c0e923e8e7c9efe02a2 in 1683ms, sequenceid=169, compaction requested=false 2024-12-09T17:21:48,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:48,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:48,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-12-09T17:21:48,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-12-09T17:21:48,921 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-12-09T17:21:48,921 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8420 sec 2024-12-09T17:21:48,924 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 1.8490 sec 2024-12-09T17:21:49,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-09T17:21:49,181 INFO [Thread-680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-12-09T17:21:49,181 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:21:49,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-12-09T17:21:49,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-09T17:21:49,183 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:21:49,183 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:21:49,183 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:21:49,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-09T17:21:49,335 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:49,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-09T17:21:49,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:49,335 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 20e312d1737c5c0e923e8e7c9efe02a2 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-09T17:21:49,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=A 2024-12-09T17:21:49,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:49,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=B 2024-12-09T17:21:49,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:49,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=C 2024-12-09T17:21:49,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:49,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209102ccd843f4e4a31980f2b7ee2aa46ba_20e312d1737c5c0e923e8e7c9efe02a2 is 50, key is test_row_0/A:col10/1733764907418/Put/seqid=0 2024-12-09T17:21:49,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742010_1186 (size=12304) 2024-12-09T17:21:49,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-09T17:21:49,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:49,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:49,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:49,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764969559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:49,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:49,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764969661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:49,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:49,755 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209102ccd843f4e4a31980f2b7ee2aa46ba_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209102ccd843f4e4a31980f2b7ee2aa46ba_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:49,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/30c126bc23ef4ffb8e5257efe7c0dfd7, store: [table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:49,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/30c126bc23ef4ffb8e5257efe7c0dfd7 is 175, key is test_row_0/A:col10/1733764907418/Put/seqid=0 2024-12-09T17:21:49,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742011_1187 (size=31105) 2024-12-09T17:21:49,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-09T17:21:49,864 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:49,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764969864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:50,161 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=202, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/30c126bc23ef4ffb8e5257efe7c0dfd7 2024-12-09T17:21:50,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/363cd6e5e34141e0ab33166d898e9165 is 50, key is test_row_0/B:col10/1733764907418/Put/seqid=0 2024-12-09T17:21:50,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:50,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764970171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:50,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742012_1188 (size=12151) 2024-12-09T17:21:50,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-09T17:21:50,576 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/363cd6e5e34141e0ab33166d898e9165 2024-12-09T17:21:50,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/8424d3b59e024a4f936904348a866bfd is 50, key is test_row_0/C:col10/1733764907418/Put/seqid=0 2024-12-09T17:21:50,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742013_1189 (size=12151) 2024-12-09T17:21:50,587 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/8424d3b59e024a4f936904348a866bfd 2024-12-09T17:21:50,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/30c126bc23ef4ffb8e5257efe7c0dfd7 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/30c126bc23ef4ffb8e5257efe7c0dfd7 2024-12-09T17:21:50,596 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/30c126bc23ef4ffb8e5257efe7c0dfd7, entries=150, sequenceid=202, filesize=30.4 K 2024-12-09T17:21:50,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/363cd6e5e34141e0ab33166d898e9165 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/363cd6e5e34141e0ab33166d898e9165 2024-12-09T17:21:50,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,601 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/363cd6e5e34141e0ab33166d898e9165, entries=150, sequenceid=202, filesize=11.9 K 2024-12-09T17:21:50,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/8424d3b59e024a4f936904348a866bfd as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/8424d3b59e024a4f936904348a866bfd 2024-12-09T17:21:50,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,607 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/8424d3b59e024a4f936904348a866bfd, entries=150, sequenceid=202, filesize=11.9 K 2024-12-09T17:21:50,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,608 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 20e312d1737c5c0e923e8e7c9efe02a2 in 1273ms, sequenceid=202, compaction requested=true 2024-12-09T17:21:50,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:50,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:50,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-12-09T17:21:50,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-12-09T17:21:50,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,611 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-09T17:21:50,611 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4270 sec 2024-12-09T17:21:50,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,612 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.4300 sec 2024-12-09T17:21:50,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,732 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 20e312d1737c5c0e923e8e7c9efe02a2 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-09T17:21:50,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=A 2024-12-09T17:21:50,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:50,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=B 2024-12-09T17:21:50,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:50,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=C 2024-12-09T17:21:50,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:50,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:50,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,740 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412090e3c1ccb4b9a4f4caa06325c22f83ab0_20e312d1737c5c0e923e8e7c9efe02a2 is 50, key is test_row_1/A:col10/1733764910719/Put/seqid=0 2024-12-09T17:21:50,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742014_1190 (size=14794) 2024-12-09T17:21:50,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,751 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,755 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412090e3c1ccb4b9a4f4caa06325c22f83ab0_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412090e3c1ccb4b9a4f4caa06325c22f83ab0_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:50,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,757 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/1b1ecd53734044768ec8a011e4154f4e, store: [table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:50,757 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/1b1ecd53734044768ec8a011e4154f4e is 175, key is test_row_1/A:col10/1733764910719/Put/seqid=0 2024-12-09T17:21:50,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742015_1191 (size=39745) 2024-12-09T17:21:50,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,767 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=213, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/1b1ecd53734044768ec8a011e4154f4e 2024-12-09T17:21:50,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,775 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/3d5f3a3f446f400ba955094acd46de6a is 50, key is test_row_1/B:col10/1733764910719/Put/seqid=0 2024-12-09T17:21:50,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742016_1192 (size=9757) 2024-12-09T17:21:50,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:50,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:50,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764970856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:50,953 INFO [master/80c69eb3c456:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T17:21:50,953 INFO [master/80c69eb3c456:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T17:21:50,961 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:50,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764970959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:51,163 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:51,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764971162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:51,182 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/3d5f3a3f446f400ba955094acd46de6a 2024-12-09T17:21:51,189 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/6234e0f70d3d4d5aa577c46765b6b912 is 50, key is test_row_1/C:col10/1733764910719/Put/seqid=0 2024-12-09T17:21:51,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742017_1193 (size=9757) 2024-12-09T17:21:51,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-09T17:21:51,286 INFO [Thread-680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-12-09T17:21:51,287 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:21:51,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-12-09T17:21:51,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-09T17:21:51,288 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:21:51,289 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:21:51,289 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:21:51,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-09T17:21:51,440 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:51,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-09T17:21:51,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:51,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:51,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:51,441 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:51,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:51,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:51,467 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:51,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764971466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:51,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-09T17:21:51,592 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:51,592 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-09T17:21:51,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:51,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:51,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:51,593 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:51,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:51,593 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/6234e0f70d3d4d5aa577c46765b6b912 2024-12-09T17:21:51,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:51,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/1b1ecd53734044768ec8a011e4154f4e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/1b1ecd53734044768ec8a011e4154f4e 2024-12-09T17:21:51,601 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/1b1ecd53734044768ec8a011e4154f4e, entries=200, sequenceid=213, filesize=38.8 K 2024-12-09T17:21:51,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/3d5f3a3f446f400ba955094acd46de6a as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/3d5f3a3f446f400ba955094acd46de6a 2024-12-09T17:21:51,606 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/3d5f3a3f446f400ba955094acd46de6a, entries=100, sequenceid=213, filesize=9.5 K 2024-12-09T17:21:51,607 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/6234e0f70d3d4d5aa577c46765b6b912 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/6234e0f70d3d4d5aa577c46765b6b912 2024-12-09T17:21:51,611 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/6234e0f70d3d4d5aa577c46765b6b912, entries=100, sequenceid=213, filesize=9.5 K 2024-12-09T17:21:51,632 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 20e312d1737c5c0e923e8e7c9efe02a2 in 901ms, sequenceid=213, compaction requested=true 2024-12-09T17:21:51,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:51,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:21:51,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:51,632 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T17:21:51,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:21:51,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:51,632 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T17:21:51,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:21:51,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:51,633 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133402 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-09T17:21:51,633 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46552 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-09T17:21:51,633 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/A is initiating minor compaction (all files) 2024-12-09T17:21:51,633 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/B is initiating minor compaction (all files) 2024-12-09T17:21:51,634 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/A in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:51,634 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/B in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:51,634 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/829bdefcf75c403f933dd440f1c3e80f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/4b2b927bb98d423498d0d87f2909e08c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/30c126bc23ef4ffb8e5257efe7c0dfd7, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/1b1ecd53734044768ec8a011e4154f4e] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=130.3 K 2024-12-09T17:21:51,634 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/dd297821cdde4474bd0355499adcdbb3, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/a9369409e70e40fe88bcbca7b22a34ef, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/363cd6e5e34141e0ab33166d898e9165, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/3d5f3a3f446f400ba955094acd46de6a] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=45.5 K 2024-12-09T17:21:51,634 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:51,634 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. files: [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/829bdefcf75c403f933dd440f1c3e80f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/4b2b927bb98d423498d0d87f2909e08c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/30c126bc23ef4ffb8e5257efe7c0dfd7, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/1b1ecd53734044768ec8a011e4154f4e] 2024-12-09T17:21:51,634 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting dd297821cdde4474bd0355499adcdbb3, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733764905926 2024-12-09T17:21:51,634 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 829bdefcf75c403f933dd440f1c3e80f, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733764905926 2024-12-09T17:21:51,634 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting a9369409e70e40fe88bcbca7b22a34ef, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733764907057 2024-12-09T17:21:51,634 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b2b927bb98d423498d0d87f2909e08c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733764907057 2024-12-09T17:21:51,634 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 363cd6e5e34141e0ab33166d898e9165, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733764907418 2024-12-09T17:21:51,635 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30c126bc23ef4ffb8e5257efe7c0dfd7, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733764907418 2024-12-09T17:21:51,635 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d5f3a3f446f400ba955094acd46de6a, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733764910719 2024-12-09T17:21:51,635 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b1ecd53734044768ec8a011e4154f4e, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733764910697 2024-12-09T17:21:51,641 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:51,655 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412095e545b0e12f24dfe9834af7a8db198cf_20e312d1737c5c0e923e8e7c9efe02a2 store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:51,656 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#B#compaction#163 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:51,656 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/561ff30311ad4a37b9a5c01909c8273a is 50, key is test_row_0/B:col10/1733764907418/Put/seqid=0 2024-12-09T17:21:51,657 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412095e545b0e12f24dfe9834af7a8db198cf_20e312d1737c5c0e923e8e7c9efe02a2, store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:51,657 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412095e545b0e12f24dfe9834af7a8db198cf_20e312d1737c5c0e923e8e7c9efe02a2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:51,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742018_1194 (size=12629) 2024-12-09T17:21:51,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742019_1195 (size=4469) 2024-12-09T17:21:51,750 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:51,751 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-09T17:21:51,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:51,751 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing 20e312d1737c5c0e923e8e7c9efe02a2 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-09T17:21:51,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=A 2024-12-09T17:21:51,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:51,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=B 2024-12-09T17:21:51,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:51,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=C 2024-12-09T17:21:51,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:51,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209ea9ae4e57f5343768ec2b714eef64d21_20e312d1737c5c0e923e8e7c9efe02a2 is 50, key is test_row_0/A:col10/1733764910851/Put/seqid=0 2024-12-09T17:21:51,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742020_1196 (size=12304) 2024-12-09T17:21:51,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:51,774 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209ea9ae4e57f5343768ec2b714eef64d21_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209ea9ae4e57f5343768ec2b714eef64d21_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:51,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/719723c60ad644ada36f23d691f5191f, store: [table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:51,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/719723c60ad644ada36f23d691f5191f is 175, key is test_row_0/A:col10/1733764910851/Put/seqid=0 2024-12-09T17:21:51,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742021_1197 (size=31105) 2024-12-09T17:21:51,781 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=238, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/719723c60ad644ada36f23d691f5191f 2024-12-09T17:21:51,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/13d2b035f6b34d169b71adbbec9a9a00 is 50, key is test_row_0/B:col10/1733764910851/Put/seqid=0 2024-12-09T17:21:51,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742022_1198 (size=12151) 2024-12-09T17:21:51,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-09T17:21:51,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:51,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:51,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:51,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764971988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:51,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:51,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764971988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:51,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:51,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41170 deadline: 1733764971996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:51,997 DEBUG [Thread-670 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8155 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., hostname=80c69eb3c456,42927,1733764865379, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:21:52,016 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:52,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41228 deadline: 1733764972016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:52,017 DEBUG [Thread-676 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8172 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., hostname=80c69eb3c456,42927,1733764865379, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:21:52,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:52,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41230 deadline: 1733764972040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:52,042 DEBUG [Thread-678 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8199 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., hostname=80c69eb3c456,42927,1733764865379, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:21:52,066 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#A#compaction#162 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:52,066 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/fcd5aa82a91f48f683d957a1ea9c4308 is 175, key is test_row_0/A:col10/1733764907418/Put/seqid=0 2024-12-09T17:21:52,066 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/561ff30311ad4a37b9a5c01909c8273a as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/561ff30311ad4a37b9a5c01909c8273a 2024-12-09T17:21:52,072 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/B of 20e312d1737c5c0e923e8e7c9efe02a2 into 561ff30311ad4a37b9a5c01909c8273a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:52,072 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:52,072 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/B, priority=12, startTime=1733764911632; duration=0sec 2024-12-09T17:21:52,072 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:52,072 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:B 2024-12-09T17:21:52,072 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T17:21:52,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742023_1199 (size=31690) 2024-12-09T17:21:52,074 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46552 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-09T17:21:52,074 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/C is initiating minor compaction (all files) 2024-12-09T17:21:52,074 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/C in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:52,074 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/6e2c62eba4b44653a505d5a8b2aeca84, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/7e1bc1334baf4986841cf63f86048e05, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/8424d3b59e024a4f936904348a866bfd, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/6234e0f70d3d4d5aa577c46765b6b912] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=45.5 K 2024-12-09T17:21:52,074 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e2c62eba4b44653a505d5a8b2aeca84, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733764905926 2024-12-09T17:21:52,075 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e1bc1334baf4986841cf63f86048e05, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733764907057 2024-12-09T17:21:52,075 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 8424d3b59e024a4f936904348a866bfd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733764907418 2024-12-09T17:21:52,075 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 6234e0f70d3d4d5aa577c46765b6b912, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733764910719 2024-12-09T17:21:52,083 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#C#compaction#166 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:52,084 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/97737a0e1f5e4175ad8cf15c50fc5c5e is 50, key is test_row_0/C:col10/1733764907418/Put/seqid=0 2024-12-09T17:21:52,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742024_1200 (size=12629) 2024-12-09T17:21:52,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:52,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764972090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:52,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:52,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764972090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:52,197 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/13d2b035f6b34d169b71adbbec9a9a00 2024-12-09T17:21:52,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/f9bf452eb3cf4dae8e19a914ae7e9882 is 50, key is test_row_0/C:col10/1733764910851/Put/seqid=0 2024-12-09T17:21:52,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742025_1201 (size=12151) 2024-12-09T17:21:52,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:52,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764972291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:52,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:52,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764972291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:52,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-09T17:21:52,476 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/fcd5aa82a91f48f683d957a1ea9c4308 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/fcd5aa82a91f48f683d957a1ea9c4308 2024-12-09T17:21:52,481 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/A of 20e312d1737c5c0e923e8e7c9efe02a2 into fcd5aa82a91f48f683d957a1ea9c4308(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:52,481 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:52,481 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/A, priority=12, startTime=1733764911632; duration=0sec 2024-12-09T17:21:52,481 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:52,481 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:A 2024-12-09T17:21:52,492 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/97737a0e1f5e4175ad8cf15c50fc5c5e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/97737a0e1f5e4175ad8cf15c50fc5c5e 2024-12-09T17:21:52,497 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/C of 20e312d1737c5c0e923e8e7c9efe02a2 into 97737a0e1f5e4175ad8cf15c50fc5c5e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:52,497 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:52,497 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/C, priority=12, startTime=1733764911632; duration=0sec 2024-12-09T17:21:52,497 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:52,497 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:C 2024-12-09T17:21:52,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:52,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764972593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:52,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:52,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764972595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:52,609 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/f9bf452eb3cf4dae8e19a914ae7e9882 2024-12-09T17:21:52,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/719723c60ad644ada36f23d691f5191f as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/719723c60ad644ada36f23d691f5191f 2024-12-09T17:21:52,617 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/719723c60ad644ada36f23d691f5191f, entries=150, sequenceid=238, filesize=30.4 K 2024-12-09T17:21:52,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/13d2b035f6b34d169b71adbbec9a9a00 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/13d2b035f6b34d169b71adbbec9a9a00 2024-12-09T17:21:52,622 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/13d2b035f6b34d169b71adbbec9a9a00, entries=150, sequenceid=238, filesize=11.9 K 2024-12-09T17:21:52,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/f9bf452eb3cf4dae8e19a914ae7e9882 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/f9bf452eb3cf4dae8e19a914ae7e9882 2024-12-09T17:21:52,626 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/f9bf452eb3cf4dae8e19a914ae7e9882, entries=150, sequenceid=238, filesize=11.9 K 2024-12-09T17:21:52,627 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 20e312d1737c5c0e923e8e7c9efe02a2 in 876ms, sequenceid=238, compaction requested=false 2024-12-09T17:21:52,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:52,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:52,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-12-09T17:21:52,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-12-09T17:21:52,629 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-09T17:21:52,629 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3390 sec 2024-12-09T17:21:52,630 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 1.3420 sec 2024-12-09T17:21:53,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:53,097 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 20e312d1737c5c0e923e8e7c9efe02a2 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-09T17:21:53,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=A 2024-12-09T17:21:53,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:53,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=B 2024-12-09T17:21:53,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:53,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=C 2024-12-09T17:21:53,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:53,104 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120907834836cba649f28bc5c1516da523c2_20e312d1737c5c0e923e8e7c9efe02a2 is 50, key is test_row_0/A:col10/1733764913096/Put/seqid=0 2024-12-09T17:21:53,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742026_1202 (size=12304) 2024-12-09T17:21:53,134 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:53,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764973131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:53,135 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:53,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764973132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:53,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:53,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:53,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764973235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:53,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764973235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:53,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-09T17:21:53,392 INFO [Thread-680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-12-09T17:21:53,393 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:21:53,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-12-09T17:21:53,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-09T17:21:53,394 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:21:53,394 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:21:53,394 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:21:53,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:53,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764973438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:53,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:53,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764973439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:53,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-09T17:21:53,510 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:53,513 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120907834836cba649f28bc5c1516da523c2_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120907834836cba649f28bc5c1516da523c2_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:53,514 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/44b35cb71ec14770be33675f730c4199, store: [table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:53,515 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/44b35cb71ec14770be33675f730c4199 is 175, key is test_row_0/A:col10/1733764913096/Put/seqid=0 2024-12-09T17:21:53,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742027_1203 (size=31105) 2024-12-09T17:21:53,545 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:53,546 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-09T17:21:53,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:53,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:53,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:53,546 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:53,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:53,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:53,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-09T17:21:53,698 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:53,698 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-09T17:21:53,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:53,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:53,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:53,699 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:53,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:53,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:53,742 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:53,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764973741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:53,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:53,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764973741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:53,850 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:53,851 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-09T17:21:53,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:53,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:53,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:53,851 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:53,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:53,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:53,919 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=253, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/44b35cb71ec14770be33675f730c4199 2024-12-09T17:21:53,926 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/0c179ae2dddd4311a1b7ba032ffbaf88 is 50, key is test_row_0/B:col10/1733764913096/Put/seqid=0 2024-12-09T17:21:53,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742028_1204 (size=12151) 2024-12-09T17:21:53,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-09T17:21:54,003 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:54,004 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-09T17:21:54,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:54,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:54,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:54,004 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:54,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:54,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:54,156 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:54,156 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-09T17:21:54,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:54,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:54,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:54,157 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:54,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:54,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:54,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:54,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764974244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:54,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:54,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764974244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:54,308 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:54,308 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-09T17:21:54,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:54,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:54,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:54,309 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:54,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:54,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:54,329 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/0c179ae2dddd4311a1b7ba032ffbaf88 2024-12-09T17:21:54,336 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/865a8b8d3b11466fb982b7173d4946ab is 50, key is test_row_0/C:col10/1733764913096/Put/seqid=0 2024-12-09T17:21:54,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742029_1205 (size=12151) 2024-12-09T17:21:54,341 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/865a8b8d3b11466fb982b7173d4946ab 2024-12-09T17:21:54,345 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/44b35cb71ec14770be33675f730c4199 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/44b35cb71ec14770be33675f730c4199 2024-12-09T17:21:54,349 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/44b35cb71ec14770be33675f730c4199, entries=150, sequenceid=253, filesize=30.4 K 2024-12-09T17:21:54,350 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/0c179ae2dddd4311a1b7ba032ffbaf88 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/0c179ae2dddd4311a1b7ba032ffbaf88 2024-12-09T17:21:54,355 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/0c179ae2dddd4311a1b7ba032ffbaf88, entries=150, sequenceid=253, filesize=11.9 K 2024-12-09T17:21:54,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/865a8b8d3b11466fb982b7173d4946ab as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/865a8b8d3b11466fb982b7173d4946ab 2024-12-09T17:21:54,360 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/865a8b8d3b11466fb982b7173d4946ab, entries=150, sequenceid=253, filesize=11.9 K 2024-12-09T17:21:54,361 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 20e312d1737c5c0e923e8e7c9efe02a2 in 1264ms, sequenceid=253, compaction requested=true 2024-12-09T17:21:54,361 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:54,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:21:54,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:54,361 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:54,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:21:54,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:54,361 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:54,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:21:54,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:54,362 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93900 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:54,362 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/A is initiating minor compaction (all files) 2024-12-09T17:21:54,362 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/A in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:54,363 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/fcd5aa82a91f48f683d957a1ea9c4308, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/719723c60ad644ada36f23d691f5191f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/44b35cb71ec14770be33675f730c4199] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=91.7 K 2024-12-09T17:21:54,363 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:54,363 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/B is initiating minor compaction (all files) 2024-12-09T17:21:54,363 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:54,363 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/B in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:54,363 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. files: [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/fcd5aa82a91f48f683d957a1ea9c4308, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/719723c60ad644ada36f23d691f5191f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/44b35cb71ec14770be33675f730c4199] 2024-12-09T17:21:54,363 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/561ff30311ad4a37b9a5c01909c8273a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/13d2b035f6b34d169b71adbbec9a9a00, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/0c179ae2dddd4311a1b7ba032ffbaf88] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=36.1 K 2024-12-09T17:21:54,363 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting fcd5aa82a91f48f683d957a1ea9c4308, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733764907418 2024-12-09T17:21:54,363 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 561ff30311ad4a37b9a5c01909c8273a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733764907418 2024-12-09T17:21:54,363 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 719723c60ad644ada36f23d691f5191f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733764910833 2024-12-09T17:21:54,364 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 13d2b035f6b34d169b71adbbec9a9a00, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733764910833 2024-12-09T17:21:54,364 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44b35cb71ec14770be33675f730c4199, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733764911980 2024-12-09T17:21:54,364 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c179ae2dddd4311a1b7ba032ffbaf88, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733764911980 2024-12-09T17:21:54,373 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:54,374 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#B#compaction#171 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:54,375 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/1486b69fba1e426d8889d9c811e6a189 is 50, key is test_row_0/B:col10/1733764913096/Put/seqid=0 2024-12-09T17:21:54,375 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241209026d1fb290de48fb868548e22396c01f_20e312d1737c5c0e923e8e7c9efe02a2 store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:54,377 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241209026d1fb290de48fb868548e22396c01f_20e312d1737c5c0e923e8e7c9efe02a2, store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:54,377 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209026d1fb290de48fb868548e22396c01f_20e312d1737c5c0e923e8e7c9efe02a2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:54,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742030_1206 (size=12731) 2024-12-09T17:21:54,387 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/1486b69fba1e426d8889d9c811e6a189 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/1486b69fba1e426d8889d9c811e6a189 2024-12-09T17:21:54,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742031_1207 (size=4469) 2024-12-09T17:21:54,393 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#A#compaction#172 average throughput is 1.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:54,393 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/B of 20e312d1737c5c0e923e8e7c9efe02a2 into 1486b69fba1e426d8889d9c811e6a189(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:54,393 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:54,393 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/B, priority=13, startTime=1733764914361; duration=0sec 2024-12-09T17:21:54,393 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/6c66e65c8a604909961b1ab9df689d5d is 175, key is test_row_0/A:col10/1733764913096/Put/seqid=0 2024-12-09T17:21:54,393 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:54,393 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:B 2024-12-09T17:21:54,394 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:54,398 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:54,398 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/C is initiating minor compaction (all files) 2024-12-09T17:21:54,398 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/C in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:54,398 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/97737a0e1f5e4175ad8cf15c50fc5c5e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/f9bf452eb3cf4dae8e19a914ae7e9882, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/865a8b8d3b11466fb982b7173d4946ab] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=36.1 K 2024-12-09T17:21:54,398 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 97737a0e1f5e4175ad8cf15c50fc5c5e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733764907418 2024-12-09T17:21:54,399 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting f9bf452eb3cf4dae8e19a914ae7e9882, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733764910833 2024-12-09T17:21:54,399 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 865a8b8d3b11466fb982b7173d4946ab, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733764911980 2024-12-09T17:21:54,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742032_1208 (size=31685) 2024-12-09T17:21:54,407 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/6c66e65c8a604909961b1ab9df689d5d as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/6c66e65c8a604909961b1ab9df689d5d 2024-12-09T17:21:54,408 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#C#compaction#173 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:54,408 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/f8de663cafac4f69982b7789c59b6c48 is 50, key is test_row_0/C:col10/1733764913096/Put/seqid=0 2024-12-09T17:21:54,412 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/A of 20e312d1737c5c0e923e8e7c9efe02a2 into 6c66e65c8a604909961b1ab9df689d5d(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:54,412 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:54,412 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/A, priority=13, startTime=1733764914361; duration=0sec 2024-12-09T17:21:54,412 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:54,412 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:A 2024-12-09T17:21:54,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742033_1209 (size=12731) 2024-12-09T17:21:54,418 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/f8de663cafac4f69982b7789c59b6c48 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/f8de663cafac4f69982b7789c59b6c48 2024-12-09T17:21:54,425 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/C of 20e312d1737c5c0e923e8e7c9efe02a2 into f8de663cafac4f69982b7789c59b6c48(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:54,425 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:54,425 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/C, priority=13, startTime=1733764914361; duration=0sec 2024-12-09T17:21:54,426 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:54,426 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:C 2024-12-09T17:21:54,461 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:54,461 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-09T17:21:54,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:54,461 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing 20e312d1737c5c0e923e8e7c9efe02a2 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-09T17:21:54,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=A 2024-12-09T17:21:54,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:54,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=B 2024-12-09T17:21:54,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:54,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=C 2024-12-09T17:21:54,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:54,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209f753d588a150467386076b8aad6ba0dc_20e312d1737c5c0e923e8e7c9efe02a2 is 50, key is test_row_0/A:col10/1733764913130/Put/seqid=0 2024-12-09T17:21:54,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742034_1210 (size=12454) 2024-12-09T17:21:54,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:54,487 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209f753d588a150467386076b8aad6ba0dc_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209f753d588a150467386076b8aad6ba0dc_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:54,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/95fddf0ed8b444ce83e51182b2b95170, store: [table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:54,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/95fddf0ed8b444ce83e51182b2b95170 is 175, key is test_row_0/A:col10/1733764913130/Put/seqid=0 2024-12-09T17:21:54,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742035_1211 (size=31255) 2024-12-09T17:21:54,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-09T17:21:54,896 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=280, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/95fddf0ed8b444ce83e51182b2b95170 2024-12-09T17:21:54,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/cc9e55fd2b214814851c3006c8c7d7c2 is 50, key is test_row_0/B:col10/1733764913130/Put/seqid=0 2024-12-09T17:21:54,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742036_1212 (size=12301) 2024-12-09T17:21:55,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:55,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:55,259 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:55,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764975257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:55,259 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:55,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764975259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:55,323 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/cc9e55fd2b214814851c3006c8c7d7c2 2024-12-09T17:21:55,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/9d5bfc637c80460a99a0298a500c94ad is 50, key is test_row_0/C:col10/1733764913130/Put/seqid=0 2024-12-09T17:21:55,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742037_1213 (size=12301) 2024-12-09T17:21:55,360 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:55,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764975360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:55,361 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:55,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764975360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:55,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-09T17:21:55,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:55,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764975562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:55,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:55,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764975563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:55,742 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/9d5bfc637c80460a99a0298a500c94ad 2024-12-09T17:21:55,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/95fddf0ed8b444ce83e51182b2b95170 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/95fddf0ed8b444ce83e51182b2b95170 2024-12-09T17:21:55,751 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/95fddf0ed8b444ce83e51182b2b95170, entries=150, sequenceid=280, filesize=30.5 K 2024-12-09T17:21:55,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/cc9e55fd2b214814851c3006c8c7d7c2 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/cc9e55fd2b214814851c3006c8c7d7c2 2024-12-09T17:21:55,757 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/cc9e55fd2b214814851c3006c8c7d7c2, entries=150, sequenceid=280, filesize=12.0 K 2024-12-09T17:21:55,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/9d5bfc637c80460a99a0298a500c94ad as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/9d5bfc637c80460a99a0298a500c94ad 2024-12-09T17:21:55,763 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/9d5bfc637c80460a99a0298a500c94ad, entries=150, sequenceid=280, filesize=12.0 K 2024-12-09T17:21:55,765 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 20e312d1737c5c0e923e8e7c9efe02a2 in 1304ms, sequenceid=280, compaction requested=false 2024-12-09T17:21:55,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:55,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:55,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-12-09T17:21:55,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-12-09T17:21:55,768 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-12-09T17:21:55,768 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3730 sec 2024-12-09T17:21:55,769 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 2.3750 sec 2024-12-09T17:21:55,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:55,868 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 20e312d1737c5c0e923e8e7c9efe02a2 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-09T17:21:55,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=A 2024-12-09T17:21:55,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:55,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=B 2024-12-09T17:21:55,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:55,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=C 2024-12-09T17:21:55,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:55,875 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209c7dbd715f10d4a8e8217b8d7fe34faa2_20e312d1737c5c0e923e8e7c9efe02a2 is 50, key is test_row_0/A:col10/1733764915866/Put/seqid=0 2024-12-09T17:21:55,898 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:55,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764975896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:55,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:55,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764975898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:55,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742038_1214 (size=12454) 2024-12-09T17:21:56,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:56,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764975999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:56,002 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:56,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764976000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:56,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:56,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764976203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:56,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:56,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764976203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:56,309 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:56,314 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209c7dbd715f10d4a8e8217b8d7fe34faa2_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209c7dbd715f10d4a8e8217b8d7fe34faa2_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:56,315 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/1c23f25d3e244784ae7a8cab503b1c84, store: [table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:56,315 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/1c23f25d3e244784ae7a8cab503b1c84 is 175, key is test_row_0/A:col10/1733764915866/Put/seqid=0 2024-12-09T17:21:56,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742039_1215 (size=31255) 2024-12-09T17:21:56,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:56,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764976504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:56,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:56,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764976506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:56,719 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=294, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/1c23f25d3e244784ae7a8cab503b1c84 2024-12-09T17:21:56,744 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/90baace7717c4ea1839ac08d66657f04 is 50, key is test_row_0/B:col10/1733764915866/Put/seqid=0 2024-12-09T17:21:56,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742040_1216 (size=12301) 2024-12-09T17:21:57,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:57,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764977009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:57,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:57,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764977009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:57,150 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/90baace7717c4ea1839ac08d66657f04 2024-12-09T17:21:57,156 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/9ea92ff87579482d87dd39f72a406ea1 is 50, key is test_row_0/C:col10/1733764915866/Put/seqid=0 2024-12-09T17:21:57,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742041_1217 (size=12301) 2024-12-09T17:21:57,159 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/9ea92ff87579482d87dd39f72a406ea1 2024-12-09T17:21:57,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/1c23f25d3e244784ae7a8cab503b1c84 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/1c23f25d3e244784ae7a8cab503b1c84 2024-12-09T17:21:57,169 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/1c23f25d3e244784ae7a8cab503b1c84, entries=150, sequenceid=294, filesize=30.5 K 2024-12-09T17:21:57,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/90baace7717c4ea1839ac08d66657f04 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/90baace7717c4ea1839ac08d66657f04 2024-12-09T17:21:57,174 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/90baace7717c4ea1839ac08d66657f04, entries=150, sequenceid=294, filesize=12.0 K 2024-12-09T17:21:57,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/9ea92ff87579482d87dd39f72a406ea1 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/9ea92ff87579482d87dd39f72a406ea1 2024-12-09T17:21:57,179 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/9ea92ff87579482d87dd39f72a406ea1, entries=150, sequenceid=294, filesize=12.0 K 2024-12-09T17:21:57,179 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 20e312d1737c5c0e923e8e7c9efe02a2 in 1311ms, sequenceid=294, compaction requested=true 2024-12-09T17:21:57,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:57,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:21:57,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:57,180 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:57,180 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:57,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:21:57,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:57,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:21:57,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:57,180 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:57,181 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/A is initiating minor compaction (all files) 2024-12-09T17:21:57,181 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/A in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:57,181 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/6c66e65c8a604909961b1ab9df689d5d, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/95fddf0ed8b444ce83e51182b2b95170, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/1c23f25d3e244784ae7a8cab503b1c84] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=92.0 K 2024-12-09T17:21:57,181 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:57,181 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. files: [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/6c66e65c8a604909961b1ab9df689d5d, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/95fddf0ed8b444ce83e51182b2b95170, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/1c23f25d3e244784ae7a8cab503b1c84] 2024-12-09T17:21:57,181 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:57,181 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/B is initiating minor compaction (all files) 2024-12-09T17:21:57,181 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/B in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:57,181 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/1486b69fba1e426d8889d9c811e6a189, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/cc9e55fd2b214814851c3006c8c7d7c2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/90baace7717c4ea1839ac08d66657f04] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=36.5 K 2024-12-09T17:21:57,182 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c66e65c8a604909961b1ab9df689d5d, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733764911980 2024-12-09T17:21:57,182 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 1486b69fba1e426d8889d9c811e6a189, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733764911980 2024-12-09T17:21:57,182 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95fddf0ed8b444ce83e51182b2b95170, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1733764913125 2024-12-09T17:21:57,182 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting cc9e55fd2b214814851c3006c8c7d7c2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1733764913125 2024-12-09T17:21:57,182 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c23f25d3e244784ae7a8cab503b1c84, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1733764915252 2024-12-09T17:21:57,182 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 90baace7717c4ea1839ac08d66657f04, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1733764915252 2024-12-09T17:21:57,188 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:57,189 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#B#compaction#181 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:57,190 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/67649c35ccc84bc18c9b15ebf439a1bd is 50, key is test_row_0/B:col10/1733764915866/Put/seqid=0 2024-12-09T17:21:57,190 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120931d420352c3a4989b326c77a3ca56840_20e312d1737c5c0e923e8e7c9efe02a2 store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:57,191 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120931d420352c3a4989b326c77a3ca56840_20e312d1737c5c0e923e8e7c9efe02a2, store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:57,192 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120931d420352c3a4989b326c77a3ca56840_20e312d1737c5c0e923e8e7c9efe02a2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:57,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742042_1218 (size=12983) 2024-12-09T17:21:57,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742043_1219 (size=4469) 2024-12-09T17:21:57,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-09T17:21:57,498 INFO [Thread-680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-12-09T17:21:57,499 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:21:57,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees 2024-12-09T17:21:57,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-09T17:21:57,500 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:21:57,500 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:21:57,501 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:21:57,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-09T17:21:57,611 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/67649c35ccc84bc18c9b15ebf439a1bd as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/67649c35ccc84bc18c9b15ebf439a1bd 2024-12-09T17:21:57,613 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#A#compaction#180 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:57,614 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/c7e175cf059f481badd57f3804da46a1 is 175, key is test_row_0/A:col10/1733764915866/Put/seqid=0 2024-12-09T17:21:57,617 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/B of 20e312d1737c5c0e923e8e7c9efe02a2 into 67649c35ccc84bc18c9b15ebf439a1bd(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:57,617 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:57,617 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/B, priority=13, startTime=1733764917180; duration=0sec 2024-12-09T17:21:57,617 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:21:57,617 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:B 2024-12-09T17:21:57,617 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:21:57,618 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:21:57,618 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/C is initiating minor compaction (all files) 2024-12-09T17:21:57,618 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/C in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:57,618 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/f8de663cafac4f69982b7789c59b6c48, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/9d5bfc637c80460a99a0298a500c94ad, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/9ea92ff87579482d87dd39f72a406ea1] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=36.5 K 2024-12-09T17:21:57,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742044_1220 (size=31937) 2024-12-09T17:21:57,619 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting f8de663cafac4f69982b7789c59b6c48, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733764911980 2024-12-09T17:21:57,620 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d5bfc637c80460a99a0298a500c94ad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1733764913125 2024-12-09T17:21:57,620 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ea92ff87579482d87dd39f72a406ea1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1733764915252 2024-12-09T17:21:57,627 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#C#compaction#182 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:21:57,627 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/82cd24cf2ecd40db97fb27627e80b055 is 50, key is test_row_0/C:col10/1733764915866/Put/seqid=0 2024-12-09T17:21:57,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742045_1221 (size=12983) 2024-12-09T17:21:57,635 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/82cd24cf2ecd40db97fb27627e80b055 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/82cd24cf2ecd40db97fb27627e80b055 2024-12-09T17:21:57,640 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/C of 20e312d1737c5c0e923e8e7c9efe02a2 into 82cd24cf2ecd40db97fb27627e80b055(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:57,640 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:57,640 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/C, priority=13, startTime=1733764917180; duration=0sec 2024-12-09T17:21:57,640 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:57,640 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:C 2024-12-09T17:21:57,652 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:57,652 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-09T17:21:57,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:57,652 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2837): Flushing 20e312d1737c5c0e923e8e7c9efe02a2 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-09T17:21:57,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=A 2024-12-09T17:21:57,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:57,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=B 2024-12-09T17:21:57,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:57,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=C 2024-12-09T17:21:57,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:57,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412090ef97170bb814f41b8d4fbef90939365_20e312d1737c5c0e923e8e7c9efe02a2 is 50, key is test_row_0/A:col10/1733764915897/Put/seqid=0 2024-12-09T17:21:57,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742046_1222 (size=12454) 2024-12-09T17:21:57,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-09T17:21:58,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:58,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:58,024 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/c7e175cf059f481badd57f3804da46a1 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/c7e175cf059f481badd57f3804da46a1 2024-12-09T17:21:58,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:58,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764978030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:58,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:58,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764978031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:58,037 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/A of 20e312d1737c5c0e923e8e7c9efe02a2 into c7e175cf059f481badd57f3804da46a1(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:21:58,037 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:58,037 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/A, priority=13, startTime=1733764917179; duration=0sec 2024-12-09T17:21:58,037 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:21:58,037 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:A 2024-12-09T17:21:58,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:58,080 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412090ef97170bb814f41b8d4fbef90939365_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412090ef97170bb814f41b8d4fbef90939365_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:58,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/9263e8278acf4664b34aa2d5101ccdb3, store: [table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:58,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/9263e8278acf4664b34aa2d5101ccdb3 is 175, key is test_row_0/A:col10/1733764915897/Put/seqid=0 2024-12-09T17:21:58,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742047_1223 (size=31255) 2024-12-09T17:21:58,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-09T17:21:58,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:58,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764978133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:58,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:58,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764978133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:58,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:58,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764978335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:58,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:58,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764978335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:58,486 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=318, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/9263e8278acf4664b34aa2d5101ccdb3 2024-12-09T17:21:58,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/aeed7779ec814424baf14ff3a578276d is 50, key is test_row_0/B:col10/1733764915897/Put/seqid=0 2024-12-09T17:21:58,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742048_1224 (size=12301) 2024-12-09T17:21:58,512 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/aeed7779ec814424baf14ff3a578276d 2024-12-09T17:21:58,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/90c5f94dd0f74950be6719eaa439eb2e is 50, key is test_row_0/C:col10/1733764915897/Put/seqid=0 2024-12-09T17:21:58,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742049_1225 (size=12301) 2024-12-09T17:21:58,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-09T17:21:58,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:58,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764978639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:58,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:58,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764978640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:58,930 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/90c5f94dd0f74950be6719eaa439eb2e 2024-12-09T17:21:58,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/9263e8278acf4664b34aa2d5101ccdb3 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/9263e8278acf4664b34aa2d5101ccdb3 2024-12-09T17:21:58,940 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/9263e8278acf4664b34aa2d5101ccdb3, entries=150, sequenceid=318, filesize=30.5 K 2024-12-09T17:21:58,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/aeed7779ec814424baf14ff3a578276d as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/aeed7779ec814424baf14ff3a578276d 2024-12-09T17:21:58,946 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/aeed7779ec814424baf14ff3a578276d, entries=150, sequenceid=318, filesize=12.0 K 2024-12-09T17:21:58,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/90c5f94dd0f74950be6719eaa439eb2e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/90c5f94dd0f74950be6719eaa439eb2e 2024-12-09T17:21:58,951 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/90c5f94dd0f74950be6719eaa439eb2e, entries=150, sequenceid=318, filesize=12.0 K 2024-12-09T17:21:58,952 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 20e312d1737c5c0e923e8e7c9efe02a2 in 1300ms, sequenceid=318, compaction requested=false 2024-12-09T17:21:58,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2538): Flush status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:21:58,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:58,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-12-09T17:21:58,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=64 2024-12-09T17:21:58,954 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-09T17:21:58,955 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4520 sec 2024-12-09T17:21:58,972 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees in 1.4720 sec 2024-12-09T17:21:59,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:59,144 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 20e312d1737c5c0e923e8e7c9efe02a2 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-09T17:21:59,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=A 2024-12-09T17:21:59,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:59,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=B 2024-12-09T17:21:59,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:59,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=C 2024-12-09T17:21:59,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:21:59,150 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209500907b548b7485a8f7738c68d3d1e1e_20e312d1737c5c0e923e8e7c9efe02a2 is 50, key is test_row_0/A:col10/1733764918029/Put/seqid=0 2024-12-09T17:21:59,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742050_1226 (size=14994) 2024-12-09T17:21:59,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:59,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764979163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:59,165 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:59,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764979164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:59,267 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:59,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764979265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:59,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:59,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764979266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:59,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:59,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764979469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:59,470 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:59,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764979469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:59,555 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:21:59,558 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209500907b548b7485a8f7738c68d3d1e1e_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209500907b548b7485a8f7738c68d3d1e1e_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:21:59,559 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/b7e4f2e6dfe14104b2bb02176e9cfef9, store: [table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:21:59,560 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/b7e4f2e6dfe14104b2bb02176e9cfef9 is 175, key is test_row_0/A:col10/1733764918029/Put/seqid=0 2024-12-09T17:21:59,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742051_1227 (size=39949) 2024-12-09T17:21:59,578 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=335, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/b7e4f2e6dfe14104b2bb02176e9cfef9 2024-12-09T17:21:59,585 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/219801cf496d478896a7adfc2e087d2d is 50, key is test_row_0/B:col10/1733764918029/Put/seqid=0 2024-12-09T17:21:59,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742052_1228 (size=12301) 2024-12-09T17:21:59,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-09T17:21:59,603 INFO [Thread-680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-12-09T17:21:59,605 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:21:59,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees 2024-12-09T17:21:59,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-09T17:21:59,606 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:21:59,607 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:21:59,607 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:21:59,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-09T17:21:59,759 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:59,759 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-09T17:21:59,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:59,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:59,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:59,760 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:59,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:59,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:59,774 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:59,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764979772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:59,774 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:21:59,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764979773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:21:59,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-09T17:21:59,911 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:21:59,912 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-09T17:21:59,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:59,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:21:59,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:21:59,912 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:59,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:21:59,989 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/219801cf496d478896a7adfc2e087d2d 2024-12-09T17:21:59,995 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/118b05af3cff414dbdf90098df1db262 is 50, key is test_row_0/C:col10/1733764918029/Put/seqid=0 2024-12-09T17:21:59,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742053_1229 (size=12301) 2024-12-09T17:22:00,064 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:00,064 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-09T17:22:00,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:00,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:22:00,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:00,065 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:00,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:00,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:00,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-09T17:22:00,216 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:00,217 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-09T17:22:00,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:00,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:22:00,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:00,217 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:00,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:00,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:00,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:00,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764980278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:00,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:00,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764980279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:00,369 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:00,369 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-09T17:22:00,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:00,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:22:00,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:00,369 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:00,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:00,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:00,399 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/118b05af3cff414dbdf90098df1db262 2024-12-09T17:22:00,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/b7e4f2e6dfe14104b2bb02176e9cfef9 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/b7e4f2e6dfe14104b2bb02176e9cfef9 2024-12-09T17:22:00,407 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/b7e4f2e6dfe14104b2bb02176e9cfef9, entries=200, sequenceid=335, filesize=39.0 K 2024-12-09T17:22:00,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/219801cf496d478896a7adfc2e087d2d as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/219801cf496d478896a7adfc2e087d2d 2024-12-09T17:22:00,412 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/219801cf496d478896a7adfc2e087d2d, entries=150, sequenceid=335, filesize=12.0 K 2024-12-09T17:22:00,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/118b05af3cff414dbdf90098df1db262 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/118b05af3cff414dbdf90098df1db262 2024-12-09T17:22:00,417 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/118b05af3cff414dbdf90098df1db262, entries=150, sequenceid=335, filesize=12.0 K 2024-12-09T17:22:00,418 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 20e312d1737c5c0e923e8e7c9efe02a2 in 1274ms, sequenceid=335, compaction requested=true 2024-12-09T17:22:00,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:22:00,419 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:00,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:22:00,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:00,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:22:00,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:00,419 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:00,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:22:00,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:00,420 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:00,420 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/A is initiating minor compaction (all files) 2024-12-09T17:22:00,420 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/A in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:00,420 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:00,420 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/c7e175cf059f481badd57f3804da46a1, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/9263e8278acf4664b34aa2d5101ccdb3, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/b7e4f2e6dfe14104b2bb02176e9cfef9] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=100.7 K 2024-12-09T17:22:00,420 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/B is initiating minor compaction (all files) 2024-12-09T17:22:00,420 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:00,420 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/B in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:00,420 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. files: [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/c7e175cf059f481badd57f3804da46a1, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/9263e8278acf4664b34aa2d5101ccdb3, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/b7e4f2e6dfe14104b2bb02176e9cfef9] 2024-12-09T17:22:00,420 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/67649c35ccc84bc18c9b15ebf439a1bd, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/aeed7779ec814424baf14ff3a578276d, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/219801cf496d478896a7adfc2e087d2d] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=36.7 K 2024-12-09T17:22:00,420 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting c7e175cf059f481badd57f3804da46a1, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1733764915252 2024-12-09T17:22:00,420 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 67649c35ccc84bc18c9b15ebf439a1bd, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1733764915252 2024-12-09T17:22:00,421 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9263e8278acf4664b34aa2d5101ccdb3, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733764915892 2024-12-09T17:22:00,421 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting aeed7779ec814424baf14ff3a578276d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733764915892 2024-12-09T17:22:00,421 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7e4f2e6dfe14104b2bb02176e9cfef9, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1733764918024 2024-12-09T17:22:00,421 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 219801cf496d478896a7adfc2e087d2d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1733764918024 2024-12-09T17:22:00,431 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#B#compaction#189 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:00,431 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:22:00,431 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/87affbf52ba944bcaa5bf8b023f5afea is 50, key is test_row_0/B:col10/1733764918029/Put/seqid=0 2024-12-09T17:22:00,444 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412093e7dd5f224014321ae298b5d432189e5_20e312d1737c5c0e923e8e7c9efe02a2 store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:22:00,445 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412093e7dd5f224014321ae298b5d432189e5_20e312d1737c5c0e923e8e7c9efe02a2, store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:22:00,445 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412093e7dd5f224014321ae298b5d432189e5_20e312d1737c5c0e923e8e7c9efe02a2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:22:00,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742054_1230 (size=13085) 2024-12-09T17:22:00,462 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/87affbf52ba944bcaa5bf8b023f5afea as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/87affbf52ba944bcaa5bf8b023f5afea 2024-12-09T17:22:00,467 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/B of 20e312d1737c5c0e923e8e7c9efe02a2 into 87affbf52ba944bcaa5bf8b023f5afea(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:00,467 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:22:00,467 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/B, priority=13, startTime=1733764920419; duration=0sec 2024-12-09T17:22:00,467 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:00,467 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:B 2024-12-09T17:22:00,467 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:00,471 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:00,471 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/C is initiating minor compaction (all files) 2024-12-09T17:22:00,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742055_1231 (size=4469) 2024-12-09T17:22:00,471 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/C in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:00,472 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/82cd24cf2ecd40db97fb27627e80b055, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/90c5f94dd0f74950be6719eaa439eb2e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/118b05af3cff414dbdf90098df1db262] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=36.7 K 2024-12-09T17:22:00,472 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 82cd24cf2ecd40db97fb27627e80b055, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1733764915252 2024-12-09T17:22:00,472 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 90c5f94dd0f74950be6719eaa439eb2e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733764915892 2024-12-09T17:22:00,473 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 118b05af3cff414dbdf90098df1db262, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1733764918024 2024-12-09T17:22:00,479 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#C#compaction#191 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:00,480 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/167e44c78fef49d1ab60507a6919ca0a is 50, key is test_row_0/C:col10/1733764918029/Put/seqid=0 2024-12-09T17:22:00,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742056_1232 (size=13085) 2024-12-09T17:22:00,521 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:00,521 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-09T17:22:00,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:00,522 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2837): Flushing 20e312d1737c5c0e923e8e7c9efe02a2 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-09T17:22:00,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=A 2024-12-09T17:22:00,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:00,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=B 2024-12-09T17:22:00,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:00,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=C 2024-12-09T17:22:00,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:00,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209cd797cc1207443238734316d5543f219_20e312d1737c5c0e923e8e7c9efe02a2 is 50, key is test_row_0/A:col10/1733764919162/Put/seqid=0 2024-12-09T17:22:00,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742057_1233 (size=12454) 2024-12-09T17:22:00,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-09T17:22:00,873 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#A#compaction#190 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:00,873 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/d1ef0072f4464ce999367b3158ce7004 is 175, key is test_row_0/A:col10/1733764918029/Put/seqid=0 2024-12-09T17:22:00,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742058_1234 (size=32039) 2024-12-09T17:22:00,884 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/d1ef0072f4464ce999367b3158ce7004 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/d1ef0072f4464ce999367b3158ce7004 2024-12-09T17:22:00,889 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/A of 20e312d1737c5c0e923e8e7c9efe02a2 into d1ef0072f4464ce999367b3158ce7004(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:00,890 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:22:00,890 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/A, priority=13, startTime=1733764920418; duration=0sec 2024-12-09T17:22:00,890 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:00,890 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:A 2024-12-09T17:22:00,894 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/167e44c78fef49d1ab60507a6919ca0a as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/167e44c78fef49d1ab60507a6919ca0a 2024-12-09T17:22:00,899 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/C of 20e312d1737c5c0e923e8e7c9efe02a2 into 167e44c78fef49d1ab60507a6919ca0a(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:00,899 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:22:00,899 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/C, priority=13, startTime=1733764920419; duration=0sec 2024-12-09T17:22:00,899 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:00,899 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:C 2024-12-09T17:22:00,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:00,936 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209cd797cc1207443238734316d5543f219_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209cd797cc1207443238734316d5543f219_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:00,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/19d9cffa89cd4ffcbcd1c8baedddd1cc, store: [table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:22:00,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/19d9cffa89cd4ffcbcd1c8baedddd1cc is 175, key is test_row_0/A:col10/1733764919162/Put/seqid=0 2024-12-09T17:22:00,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742059_1235 (size=31255) 2024-12-09T17:22:00,944 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=356, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/19d9cffa89cd4ffcbcd1c8baedddd1cc 2024-12-09T17:22:00,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/40237b2980094b4f8401e0d040301d59 is 50, key is test_row_0/B:col10/1733764919162/Put/seqid=0 2024-12-09T17:22:00,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742060_1236 (size=12301) 2024-12-09T17:22:00,959 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/40237b2980094b4f8401e0d040301d59 2024-12-09T17:22:00,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/b5fdc7f7432c47a5be0cce99ca9dfdd1 is 50, key is test_row_0/C:col10/1733764919162/Put/seqid=0 2024-12-09T17:22:00,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742061_1237 (size=12301) 2024-12-09T17:22:00,987 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/b5fdc7f7432c47a5be0cce99ca9dfdd1 2024-12-09T17:22:00,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/19d9cffa89cd4ffcbcd1c8baedddd1cc as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/19d9cffa89cd4ffcbcd1c8baedddd1cc 2024-12-09T17:22:00,999 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/19d9cffa89cd4ffcbcd1c8baedddd1cc, entries=150, sequenceid=356, filesize=30.5 K 2024-12-09T17:22:01,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/40237b2980094b4f8401e0d040301d59 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/40237b2980094b4f8401e0d040301d59 2024-12-09T17:22:01,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,003 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/40237b2980094b4f8401e0d040301d59, entries=150, sequenceid=356, filesize=12.0 K 2024-12-09T17:22:01,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/b5fdc7f7432c47a5be0cce99ca9dfdd1 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/b5fdc7f7432c47a5be0cce99ca9dfdd1 2024-12-09T17:22:01,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,012 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/b5fdc7f7432c47a5be0cce99ca9dfdd1, entries=150, sequenceid=356, filesize=12.0 K 2024-12-09T17:22:01,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,014 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=0 B/0 for 20e312d1737c5c0e923e8e7c9efe02a2 in 492ms, sequenceid=356, compaction requested=false 2024-12-09T17:22:01,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2538): Flush status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:22:01,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:01,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=66 2024-12-09T17:22:01,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=66 2024-12-09T17:22:01,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,017 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-09T17:22:01,017 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4090 sec 2024-12-09T17:22:01,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,019 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees in 1.4130 sec 2024-12-09T17:22:01,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:01,351 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 20e312d1737c5c0e923e8e7c9efe02a2 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-09T17:22:01,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=A 2024-12-09T17:22:01,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:01,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=B 2024-12-09T17:22:01,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:01,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=C 2024-12-09T17:22:01,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:01,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209f168941467a6403e937c805cb2e62074_20e312d1737c5c0e923e8e7c9efe02a2 is 50, key is test_row_0/A:col10/1733764921345/Put/seqid=0 2024-12-09T17:22:01,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742062_1238 (size=14994) 2024-12-09T17:22:01,391 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:01,396 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209f168941467a6403e937c805cb2e62074_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209f168941467a6403e937c805cb2e62074_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:01,398 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/f54cb1f5c1164db98dc3d3bc0333b5c3, store: [table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:22:01,399 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/f54cb1f5c1164db98dc3d3bc0333b5c3 is 175, key is test_row_0/A:col10/1733764921345/Put/seqid=0 2024-12-09T17:22:01,400 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:01,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764981397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:01,403 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:01,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764981401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:01,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742063_1239 (size=39949) 2024-12-09T17:22:01,428 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=369, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/f54cb1f5c1164db98dc3d3bc0333b5c3 2024-12-09T17:22:01,441 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/4eca8b003dac4c049fd9d4d365789394 is 50, key is test_row_0/B:col10/1733764921345/Put/seqid=0 2024-12-09T17:22:01,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742064_1240 (size=12301) 2024-12-09T17:22:01,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:01,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764981501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:01,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:01,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 281 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764981505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:01,704 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:01,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41198 deadline: 1733764981704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:01,708 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:01,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 283 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41178 deadline: 1733764981707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:01,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-09T17:22:01,709 INFO [Thread-680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-12-09T17:22:01,710 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:22:01,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees 2024-12-09T17:22:01,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-09T17:22:01,713 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:22:01,713 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:22:01,713 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:22:01,739 DEBUG [Thread-683 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d5e0e3f to 127.0.0.1:54326 2024-12-09T17:22:01,739 DEBUG [Thread-685 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x70f48df4 to 127.0.0.1:54326 2024-12-09T17:22:01,739 DEBUG [Thread-683 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:22:01,739 DEBUG [Thread-685 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:22:01,740 DEBUG [Thread-681 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x706b2cde to 127.0.0.1:54326 2024-12-09T17:22:01,740 DEBUG [Thread-681 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:22:01,741 DEBUG [Thread-687 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3cd5be36 to 127.0.0.1:54326 2024-12-09T17:22:01,741 DEBUG [Thread-687 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:22:01,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-09T17:22:01,865 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:01,865 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/4eca8b003dac4c049fd9d4d365789394 2024-12-09T17:22:01,865 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-09T17:22:01,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:01,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:22:01,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:01,866 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:01,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:01,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:01,872 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/f25b0278d2524505905f9ae2c65f68d6 is 50, key is test_row_0/C:col10/1733764921345/Put/seqid=0 2024-12-09T17:22:01,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742065_1241 (size=12301) 2024-12-09T17:22:01,876 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/f25b0278d2524505905f9ae2c65f68d6 2024-12-09T17:22:01,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/f54cb1f5c1164db98dc3d3bc0333b5c3 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/f54cb1f5c1164db98dc3d3bc0333b5c3 2024-12-09T17:22:01,883 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/f54cb1f5c1164db98dc3d3bc0333b5c3, entries=200, sequenceid=369, filesize=39.0 K 2024-12-09T17:22:01,884 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/4eca8b003dac4c049fd9d4d365789394 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/4eca8b003dac4c049fd9d4d365789394 2024-12-09T17:22:01,887 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/4eca8b003dac4c049fd9d4d365789394, entries=150, sequenceid=369, filesize=12.0 K 2024-12-09T17:22:01,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/f25b0278d2524505905f9ae2c65f68d6 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/f25b0278d2524505905f9ae2c65f68d6 2024-12-09T17:22:01,892 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/f25b0278d2524505905f9ae2c65f68d6, entries=150, sequenceid=369, filesize=12.0 K 2024-12-09T17:22:01,892 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 20e312d1737c5c0e923e8e7c9efe02a2 in 542ms, sequenceid=369, compaction requested=true 2024-12-09T17:22:01,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:22:01,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:22:01,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:01,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:22:01,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:01,893 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:01,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e312d1737c5c0e923e8e7c9efe02a2:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:22:01,893 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:01,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:01,894 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103243 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:01,894 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:01,894 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/A is initiating minor compaction (all files) 2024-12-09T17:22:01,894 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/B is initiating minor compaction (all files) 2024-12-09T17:22:01,894 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/A in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:01,894 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/B in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:01,894 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/d1ef0072f4464ce999367b3158ce7004, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/19d9cffa89cd4ffcbcd1c8baedddd1cc, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/f54cb1f5c1164db98dc3d3bc0333b5c3] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=100.8 K 2024-12-09T17:22:01,894 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/87affbf52ba944bcaa5bf8b023f5afea, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/40237b2980094b4f8401e0d040301d59, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/4eca8b003dac4c049fd9d4d365789394] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=36.8 K 2024-12-09T17:22:01,894 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:01,894 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. files: [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/d1ef0072f4464ce999367b3158ce7004, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/19d9cffa89cd4ffcbcd1c8baedddd1cc, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/f54cb1f5c1164db98dc3d3bc0333b5c3] 2024-12-09T17:22:01,894 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 87affbf52ba944bcaa5bf8b023f5afea, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1733764918024 2024-12-09T17:22:01,894 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1ef0072f4464ce999367b3158ce7004, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1733764918024 2024-12-09T17:22:01,895 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 40237b2980094b4f8401e0d040301d59, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1733764919162 2024-12-09T17:22:01,895 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 19d9cffa89cd4ffcbcd1c8baedddd1cc, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1733764919162 2024-12-09T17:22:01,895 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting f54cb1f5c1164db98dc3d3bc0333b5c3, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1733764921344 2024-12-09T17:22:01,895 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4eca8b003dac4c049fd9d4d365789394, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1733764921344 2024-12-09T17:22:01,903 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#B#compaction#198 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:01,904 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/a80228e582e44407ac97a881336ab8f7 is 50, key is test_row_0/B:col10/1733764921345/Put/seqid=0 2024-12-09T17:22:01,906 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:22:01,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742066_1242 (size=13187) 2024-12-09T17:22:01,921 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241209c4c4d05ed8f94ce0861ac0261818d3ec_20e312d1737c5c0e923e8e7c9efe02a2 store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:22:01,942 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241209c4c4d05ed8f94ce0861ac0261818d3ec_20e312d1737c5c0e923e8e7c9efe02a2, store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:22:01,942 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209c4c4d05ed8f94ce0861ac0261818d3ec_20e312d1737c5c0e923e8e7c9efe02a2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:22:01,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742067_1243 (size=4469) 2024-12-09T17:22:02,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:02,012 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 20e312d1737c5c0e923e8e7c9efe02a2 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-09T17:22:02,012 DEBUG [Thread-672 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2d006bed to 127.0.0.1:54326 2024-12-09T17:22:02,012 DEBUG [Thread-672 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:22:02,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-09T17:22:02,017 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:02,017 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-09T17:22:02,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:02,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:22:02,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:02,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=A 2024-12-09T17:22:02,018 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:02,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:02,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=B 2024-12-09T17:22:02,018 DEBUG [Thread-674 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5b8e1501 to 127.0.0.1:54326 2024-12-09T17:22:02,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:02,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:02,018 DEBUG [Thread-674 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:22:02,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=C 2024-12-09T17:22:02,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:02,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:02,033 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209cea2d0ad251f4be5ba8b01e0b2acc030_20e312d1737c5c0e923e8e7c9efe02a2 is 50, key is test_row_0/A:col10/1733764921394/Put/seqid=0 2024-12-09T17:22:02,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742068_1244 (size=12454) 2024-12-09T17:22:02,037 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:02,042 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209cea2d0ad251f4be5ba8b01e0b2acc030_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209cea2d0ad251f4be5ba8b01e0b2acc030_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:02,043 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/7690e76990a34d80ba3d98d1f345c869, store: [table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:22:02,044 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/7690e76990a34d80ba3d98d1f345c869 is 175, key is test_row_0/A:col10/1733764921394/Put/seqid=0 2024-12-09T17:22:02,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742069_1245 (size=31255) 2024-12-09T17:22:02,053 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=396, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/7690e76990a34d80ba3d98d1f345c869 2024-12-09T17:22:02,055 DEBUG [Thread-670 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7435a904 to 127.0.0.1:54326 2024-12-09T17:22:02,055 DEBUG [Thread-670 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:22:02,062 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/cf4bc6a2ebcc464690274ebcdf34c369 is 50, key is test_row_0/B:col10/1733764921394/Put/seqid=0 2024-12-09T17:22:02,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742070_1246 (size=12301) 2024-12-09T17:22:02,108 DEBUG [Thread-676 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x70b41629 to 127.0.0.1:54326 2024-12-09T17:22:02,108 DEBUG [Thread-676 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:22:02,129 DEBUG [Thread-678 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3a0312cf to 127.0.0.1:54326 2024-12-09T17:22:02,129 DEBUG [Thread-678 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:22:02,171 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:02,171 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-09T17:22:02,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:02,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:22:02,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:02,171 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:02,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:02,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:02,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-09T17:22:02,318 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/a80228e582e44407ac97a881336ab8f7 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/a80228e582e44407ac97a881336ab8f7 2024-12-09T17:22:02,322 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/B of 20e312d1737c5c0e923e8e7c9efe02a2 into a80228e582e44407ac97a881336ab8f7(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:02,322 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:22:02,323 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/B, priority=13, startTime=1733764921893; duration=0sec 2024-12-09T17:22:02,323 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:02,323 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:B 2024-12-09T17:22:02,323 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:02,323 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:02,324 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-09T17:22:02,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:02,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:22:02,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:02,324 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:02,324 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 20e312d1737c5c0e923e8e7c9efe02a2/C is initiating minor compaction (all files) 2024-12-09T17:22:02,324 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:02,324 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 20e312d1737c5c0e923e8e7c9efe02a2/C in TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:02,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:02,324 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/167e44c78fef49d1ab60507a6919ca0a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/b5fdc7f7432c47a5be0cce99ca9dfdd1, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/f25b0278d2524505905f9ae2c65f68d6] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp, totalSize=36.8 K 2024-12-09T17:22:02,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:02,325 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 167e44c78fef49d1ab60507a6919ca0a, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1733764918024 2024-12-09T17:22:02,325 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting b5fdc7f7432c47a5be0cce99ca9dfdd1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1733764919162 2024-12-09T17:22:02,325 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting f25b0278d2524505905f9ae2c65f68d6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1733764921344 2024-12-09T17:22:02,334 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#C#compaction#202 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:02,335 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/229e27f5bee1476a900360cfd3fe44dd is 50, key is test_row_0/C:col10/1733764921345/Put/seqid=0 2024-12-09T17:22:02,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742071_1247 (size=13187) 2024-12-09T17:22:02,348 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e312d1737c5c0e923e8e7c9efe02a2#A#compaction#199 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:02,348 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/db3aa9b059014fcfa32255f4396d15df is 175, key is test_row_0/A:col10/1733764921345/Put/seqid=0 2024-12-09T17:22:02,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742072_1248 (size=32141) 2024-12-09T17:22:02,467 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=396 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/cf4bc6a2ebcc464690274ebcdf34c369 2024-12-09T17:22:02,474 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/d81f31f93a4d40208a32834636c0d98e is 50, key is test_row_0/C:col10/1733764921394/Put/seqid=0 2024-12-09T17:22:02,476 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:02,476 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-09T17:22:02,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:02,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. as already flushing 2024-12-09T17:22:02,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:02,477 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:02,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:02,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742073_1249 (size=12301) 2024-12-09T17:22:02,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:02,478 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=396 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/d81f31f93a4d40208a32834636c0d98e 2024-12-09T17:22:02,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/7690e76990a34d80ba3d98d1f345c869 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/7690e76990a34d80ba3d98d1f345c869 2024-12-09T17:22:02,485 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/7690e76990a34d80ba3d98d1f345c869, entries=150, sequenceid=396, filesize=30.5 K 2024-12-09T17:22:02,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/cf4bc6a2ebcc464690274ebcdf34c369 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/cf4bc6a2ebcc464690274ebcdf34c369 2024-12-09T17:22:02,489 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/cf4bc6a2ebcc464690274ebcdf34c369, entries=150, sequenceid=396, filesize=12.0 K 2024-12-09T17:22:02,490 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/d81f31f93a4d40208a32834636c0d98e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/d81f31f93a4d40208a32834636c0d98e 2024-12-09T17:22:02,494 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/d81f31f93a4d40208a32834636c0d98e, entries=150, sequenceid=396, filesize=12.0 K 2024-12-09T17:22:02,495 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=20.13 KB/20610 for 20e312d1737c5c0e923e8e7c9efe02a2 in 482ms, sequenceid=396, compaction requested=false 2024-12-09T17:22:02,495 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:22:02,628 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:02,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-09T17:22:02,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:02,629 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2837): Flushing 20e312d1737c5c0e923e8e7c9efe02a2 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-09T17:22:02,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=A 2024-12-09T17:22:02,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:02,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=B 2024-12-09T17:22:02,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:02,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 20e312d1737c5c0e923e8e7c9efe02a2, store=C 2024-12-09T17:22:02,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:02,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412098ba8b3501a0e4afc85093ac222998d6f_20e312d1737c5c0e923e8e7c9efe02a2 is 50, key is test_row_0/A:col10/1733764922051/Put/seqid=0 2024-12-09T17:22:02,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742074_1250 (size=9914) 2024-12-09T17:22:02,745 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/229e27f5bee1476a900360cfd3fe44dd as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/229e27f5bee1476a900360cfd3fe44dd 2024-12-09T17:22:02,751 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/C of 20e312d1737c5c0e923e8e7c9efe02a2 into 229e27f5bee1476a900360cfd3fe44dd(size=12.9 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:02,751 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:22:02,751 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/C, priority=13, startTime=1733764921893; duration=0sec 2024-12-09T17:22:02,751 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:02,751 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:C 2024-12-09T17:22:02,756 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/db3aa9b059014fcfa32255f4396d15df as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/db3aa9b059014fcfa32255f4396d15df 2024-12-09T17:22:02,760 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 20e312d1737c5c0e923e8e7c9efe02a2/A of 20e312d1737c5c0e923e8e7c9efe02a2 into db3aa9b059014fcfa32255f4396d15df(size=31.4 K), total size for store is 61.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:02,760 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:22:02,760 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2., storeName=20e312d1737c5c0e923e8e7c9efe02a2/A, priority=13, startTime=1733764921893; duration=0sec 2024-12-09T17:22:02,760 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:02,760 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e312d1737c5c0e923e8e7c9efe02a2:A 2024-12-09T17:22:02,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-09T17:22:03,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:03,049 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412098ba8b3501a0e4afc85093ac222998d6f_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412098ba8b3501a0e4afc85093ac222998d6f_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:03,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/99166e2787464241bc3b26296e1fe25a, store: [table=TestAcidGuarantees family=A region=20e312d1737c5c0e923e8e7c9efe02a2] 2024-12-09T17:22:03,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/99166e2787464241bc3b26296e1fe25a is 175, key is test_row_0/A:col10/1733764922051/Put/seqid=0 2024-12-09T17:22:03,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742075_1251 (size=22561) 2024-12-09T17:22:03,460 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=403, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/99166e2787464241bc3b26296e1fe25a 2024-12-09T17:22:03,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/00f73be107d74c9b89bb6f2dc6c2e69e is 50, key is test_row_0/B:col10/1733764922051/Put/seqid=0 2024-12-09T17:22:03,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742076_1252 (size=9857) 2024-12-09T17:22:03,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-09T17:22:03,857 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T17:22:03,880 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=403 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/00f73be107d74c9b89bb6f2dc6c2e69e 2024-12-09T17:22:03,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/f3804dbcaaac45f0a31f9a11e5a5fbe9 is 50, key is test_row_0/C:col10/1733764922051/Put/seqid=0 2024-12-09T17:22:03,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742077_1253 (size=9857) 2024-12-09T17:22:04,296 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=403 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/f3804dbcaaac45f0a31f9a11e5a5fbe9 2024-12-09T17:22:04,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/A/99166e2787464241bc3b26296e1fe25a as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/99166e2787464241bc3b26296e1fe25a 2024-12-09T17:22:04,306 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/99166e2787464241bc3b26296e1fe25a, entries=100, sequenceid=403, filesize=22.0 K 2024-12-09T17:22:04,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/B/00f73be107d74c9b89bb6f2dc6c2e69e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/00f73be107d74c9b89bb6f2dc6c2e69e 2024-12-09T17:22:04,311 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/00f73be107d74c9b89bb6f2dc6c2e69e, entries=100, sequenceid=403, filesize=9.6 K 2024-12-09T17:22:04,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/.tmp/C/f3804dbcaaac45f0a31f9a11e5a5fbe9 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/f3804dbcaaac45f0a31f9a11e5a5fbe9 2024-12-09T17:22:04,315 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/f3804dbcaaac45f0a31f9a11e5a5fbe9, entries=100, sequenceid=403, filesize=9.6 K 2024-12-09T17:22:04,316 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 20e312d1737c5c0e923e8e7c9efe02a2 in 1687ms, sequenceid=403, compaction requested=true 2024-12-09T17:22:04,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2538): Flush status journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:22:04,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:04,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-12-09T17:22:04,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=68 2024-12-09T17:22:04,318 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-12-09T17:22:04,318 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6040 sec 2024-12-09T17:22:04,319 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees in 2.6090 sec 2024-12-09T17:22:05,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-09T17:22:05,820 INFO [Thread-680 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-12-09T17:22:05,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-09T17:22:05,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 21 2024-12-09T17:22:05,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 85 2024-12-09T17:22:05,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 164 2024-12-09T17:22:05,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 23 2024-12-09T17:22:05,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 21 2024-12-09T17:22:05,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-09T17:22:05,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8466 2024-12-09T17:22:05,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8074 2024-12-09T17:22:05,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-09T17:22:05,821 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3455 2024-12-09T17:22:05,821 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10364 rows 2024-12-09T17:22:05,821 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3439 2024-12-09T17:22:05,821 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10317 rows 2024-12-09T17:22:05,821 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-09T17:22:05,821 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7a8da76b to 127.0.0.1:54326 2024-12-09T17:22:05,821 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:22:05,826 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-09T17:22:05,827 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-09T17:22:05,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-09T17:22:05,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-09T17:22:05,831 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733764925831"}]},"ts":"1733764925831"} 2024-12-09T17:22:05,832 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-09T17:22:05,875 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-09T17:22:05,876 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-09T17:22:05,878 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=20e312d1737c5c0e923e8e7c9efe02a2, UNASSIGN}] 2024-12-09T17:22:05,880 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=20e312d1737c5c0e923e8e7c9efe02a2, UNASSIGN 2024-12-09T17:22:05,881 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=71 updating hbase:meta row=20e312d1737c5c0e923e8e7c9efe02a2, regionState=CLOSING, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:05,882 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T17:22:05,882 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; CloseRegionProcedure 20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379}] 2024-12-09T17:22:05,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-09T17:22:06,035 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:06,036 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=72}] handler.UnassignRegionHandler(124): Close 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,036 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=72}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-09T17:22:06,036 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=72}] regionserver.HRegion(1681): Closing 20e312d1737c5c0e923e8e7c9efe02a2, disabling compactions & flushes 2024-12-09T17:22:06,036 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=72}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:06,036 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=72}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:06,036 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=72}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. after waiting 0 ms 2024-12-09T17:22:06,036 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=72}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:06,038 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/c666380445de45c89cf2b0f025761d47, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/df80a7069ab3496aabe3c362c1e94a96, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/8518b7abc64b4127ab1175cf7248a7a6, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/864bae374bff47ffb32d73f3c0eab616, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/c52875f7ed184bfeb42e535f57a849ce, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/3850cac7da474afba7e6d2dab5d4000e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/f58cc3e0982f4856912375cd5d891a84, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/b53a8762f48343179f29ed264e52f1d5, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/27ba081e176e48ebaf3a3ab146f8380a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/829bdefcf75c403f933dd440f1c3e80f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/6a0b0bca6c6e444cb361e27f773b496e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/4b2b927bb98d423498d0d87f2909e08c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/30c126bc23ef4ffb8e5257efe7c0dfd7, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/1b1ecd53734044768ec8a011e4154f4e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/fcd5aa82a91f48f683d957a1ea9c4308, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/719723c60ad644ada36f23d691f5191f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/6c66e65c8a604909961b1ab9df689d5d, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/44b35cb71ec14770be33675f730c4199, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/95fddf0ed8b444ce83e51182b2b95170, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/c7e175cf059f481badd57f3804da46a1, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/1c23f25d3e244784ae7a8cab503b1c84, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/9263e8278acf4664b34aa2d5101ccdb3, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/b7e4f2e6dfe14104b2bb02176e9cfef9, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/d1ef0072f4464ce999367b3158ce7004, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/19d9cffa89cd4ffcbcd1c8baedddd1cc, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/f54cb1f5c1164db98dc3d3bc0333b5c3] to archive 2024-12-09T17:22:06,041 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T17:22:06,045 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/c666380445de45c89cf2b0f025761d47 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/c666380445de45c89cf2b0f025761d47 2024-12-09T17:22:06,047 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/df80a7069ab3496aabe3c362c1e94a96 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/df80a7069ab3496aabe3c362c1e94a96 2024-12-09T17:22:06,050 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/8518b7abc64b4127ab1175cf7248a7a6 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/8518b7abc64b4127ab1175cf7248a7a6 2024-12-09T17:22:06,052 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/864bae374bff47ffb32d73f3c0eab616 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/864bae374bff47ffb32d73f3c0eab616 2024-12-09T17:22:06,055 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/c52875f7ed184bfeb42e535f57a849ce to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/c52875f7ed184bfeb42e535f57a849ce 2024-12-09T17:22:06,058 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/3850cac7da474afba7e6d2dab5d4000e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/3850cac7da474afba7e6d2dab5d4000e 2024-12-09T17:22:06,061 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/f58cc3e0982f4856912375cd5d891a84 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/f58cc3e0982f4856912375cd5d891a84 2024-12-09T17:22:06,064 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/b53a8762f48343179f29ed264e52f1d5 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/b53a8762f48343179f29ed264e52f1d5 2024-12-09T17:22:06,066 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/27ba081e176e48ebaf3a3ab146f8380a to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/27ba081e176e48ebaf3a3ab146f8380a 2024-12-09T17:22:06,069 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/829bdefcf75c403f933dd440f1c3e80f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/829bdefcf75c403f933dd440f1c3e80f 2024-12-09T17:22:06,071 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/6a0b0bca6c6e444cb361e27f773b496e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/6a0b0bca6c6e444cb361e27f773b496e 2024-12-09T17:22:06,073 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/4b2b927bb98d423498d0d87f2909e08c to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/4b2b927bb98d423498d0d87f2909e08c 2024-12-09T17:22:06,075 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/30c126bc23ef4ffb8e5257efe7c0dfd7 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/30c126bc23ef4ffb8e5257efe7c0dfd7 2024-12-09T17:22:06,078 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/1b1ecd53734044768ec8a011e4154f4e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/1b1ecd53734044768ec8a011e4154f4e 2024-12-09T17:22:06,080 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/fcd5aa82a91f48f683d957a1ea9c4308 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/fcd5aa82a91f48f683d957a1ea9c4308 2024-12-09T17:22:06,082 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/719723c60ad644ada36f23d691f5191f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/719723c60ad644ada36f23d691f5191f 2024-12-09T17:22:06,084 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/6c66e65c8a604909961b1ab9df689d5d to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/6c66e65c8a604909961b1ab9df689d5d 2024-12-09T17:22:06,086 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/44b35cb71ec14770be33675f730c4199 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/44b35cb71ec14770be33675f730c4199 2024-12-09T17:22:06,088 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/95fddf0ed8b444ce83e51182b2b95170 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/95fddf0ed8b444ce83e51182b2b95170 2024-12-09T17:22:06,089 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/c7e175cf059f481badd57f3804da46a1 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/c7e175cf059f481badd57f3804da46a1 2024-12-09T17:22:06,091 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/1c23f25d3e244784ae7a8cab503b1c84 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/1c23f25d3e244784ae7a8cab503b1c84 2024-12-09T17:22:06,093 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/9263e8278acf4664b34aa2d5101ccdb3 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/9263e8278acf4664b34aa2d5101ccdb3 2024-12-09T17:22:06,095 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/b7e4f2e6dfe14104b2bb02176e9cfef9 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/b7e4f2e6dfe14104b2bb02176e9cfef9 2024-12-09T17:22:06,097 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/d1ef0072f4464ce999367b3158ce7004 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/d1ef0072f4464ce999367b3158ce7004 2024-12-09T17:22:06,099 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/19d9cffa89cd4ffcbcd1c8baedddd1cc to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/19d9cffa89cd4ffcbcd1c8baedddd1cc 2024-12-09T17:22:06,101 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/f54cb1f5c1164db98dc3d3bc0333b5c3 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/f54cb1f5c1164db98dc3d3bc0333b5c3 2024-12-09T17:22:06,103 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/cf95aac0ad31444e849776e6f9d6cd75, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/0a5fadb73e234b5f9976e5100275cfaf, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/48be931eb1154851b877f6503baefb13, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/518b36c7c7eb4e32b9fea3667f569093, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/f8fe7322be0b40cab9492682bfc097a2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/37a5327b354b488e81463abe560caae2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/72eac906d76d4278a2d5953c7a957e5c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/192fdbbb539f4249b52014632e2d1bc4, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/82dc7272891b4b2798836e5aea9a735f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/dd297821cdde4474bd0355499adcdbb3, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/9ca1d83617f44f95aa460ff762bc3dbd, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/a9369409e70e40fe88bcbca7b22a34ef, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/363cd6e5e34141e0ab33166d898e9165, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/561ff30311ad4a37b9a5c01909c8273a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/3d5f3a3f446f400ba955094acd46de6a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/13d2b035f6b34d169b71adbbec9a9a00, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/1486b69fba1e426d8889d9c811e6a189, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/0c179ae2dddd4311a1b7ba032ffbaf88, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/cc9e55fd2b214814851c3006c8c7d7c2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/67649c35ccc84bc18c9b15ebf439a1bd, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/90baace7717c4ea1839ac08d66657f04, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/aeed7779ec814424baf14ff3a578276d, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/87affbf52ba944bcaa5bf8b023f5afea, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/219801cf496d478896a7adfc2e087d2d, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/40237b2980094b4f8401e0d040301d59, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/4eca8b003dac4c049fd9d4d365789394] to archive 2024-12-09T17:22:06,104 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T17:22:06,106 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/cf95aac0ad31444e849776e6f9d6cd75 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/cf95aac0ad31444e849776e6f9d6cd75 2024-12-09T17:22:06,108 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/0a5fadb73e234b5f9976e5100275cfaf to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/0a5fadb73e234b5f9976e5100275cfaf 2024-12-09T17:22:06,109 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/48be931eb1154851b877f6503baefb13 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/48be931eb1154851b877f6503baefb13 2024-12-09T17:22:06,110 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/518b36c7c7eb4e32b9fea3667f569093 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/518b36c7c7eb4e32b9fea3667f569093 2024-12-09T17:22:06,110 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/f8fe7322be0b40cab9492682bfc097a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/f8fe7322be0b40cab9492682bfc097a2 2024-12-09T17:22:06,111 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/37a5327b354b488e81463abe560caae2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/37a5327b354b488e81463abe560caae2 2024-12-09T17:22:06,112 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/72eac906d76d4278a2d5953c7a957e5c to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/72eac906d76d4278a2d5953c7a957e5c 2024-12-09T17:22:06,113 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/192fdbbb539f4249b52014632e2d1bc4 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/192fdbbb539f4249b52014632e2d1bc4 2024-12-09T17:22:06,114 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/82dc7272891b4b2798836e5aea9a735f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/82dc7272891b4b2798836e5aea9a735f 2024-12-09T17:22:06,115 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/dd297821cdde4474bd0355499adcdbb3 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/dd297821cdde4474bd0355499adcdbb3 2024-12-09T17:22:06,115 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/9ca1d83617f44f95aa460ff762bc3dbd to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/9ca1d83617f44f95aa460ff762bc3dbd 2024-12-09T17:22:06,116 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/a9369409e70e40fe88bcbca7b22a34ef to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/a9369409e70e40fe88bcbca7b22a34ef 2024-12-09T17:22:06,117 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/363cd6e5e34141e0ab33166d898e9165 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/363cd6e5e34141e0ab33166d898e9165 2024-12-09T17:22:06,118 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/561ff30311ad4a37b9a5c01909c8273a to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/561ff30311ad4a37b9a5c01909c8273a 2024-12-09T17:22:06,118 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/3d5f3a3f446f400ba955094acd46de6a to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/3d5f3a3f446f400ba955094acd46de6a 2024-12-09T17:22:06,119 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/13d2b035f6b34d169b71adbbec9a9a00 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/13d2b035f6b34d169b71adbbec9a9a00 2024-12-09T17:22:06,120 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/1486b69fba1e426d8889d9c811e6a189 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/1486b69fba1e426d8889d9c811e6a189 2024-12-09T17:22:06,120 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/0c179ae2dddd4311a1b7ba032ffbaf88 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/0c179ae2dddd4311a1b7ba032ffbaf88 2024-12-09T17:22:06,121 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/cc9e55fd2b214814851c3006c8c7d7c2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/cc9e55fd2b214814851c3006c8c7d7c2 2024-12-09T17:22:06,122 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/67649c35ccc84bc18c9b15ebf439a1bd to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/67649c35ccc84bc18c9b15ebf439a1bd 2024-12-09T17:22:06,122 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/90baace7717c4ea1839ac08d66657f04 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/90baace7717c4ea1839ac08d66657f04 2024-12-09T17:22:06,123 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/aeed7779ec814424baf14ff3a578276d to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/aeed7779ec814424baf14ff3a578276d 2024-12-09T17:22:06,124 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/87affbf52ba944bcaa5bf8b023f5afea to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/87affbf52ba944bcaa5bf8b023f5afea 2024-12-09T17:22:06,125 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/219801cf496d478896a7adfc2e087d2d to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/219801cf496d478896a7adfc2e087d2d 2024-12-09T17:22:06,125 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/40237b2980094b4f8401e0d040301d59 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/40237b2980094b4f8401e0d040301d59 2024-12-09T17:22:06,126 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/4eca8b003dac4c049fd9d4d365789394 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/4eca8b003dac4c049fd9d4d365789394 2024-12-09T17:22:06,127 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/be985125491f4fffb330c2bf3129be05, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/ac9db662380b4bf390f0a332faeae2cb, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/3958d990216243479d6ee3a98511b109, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/9145093d25d346efa4eb4b9a29cc5b9e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/61d920ca3f9e454ab062ab39688f7f9b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/8bec2c9f721f42f2ba68b21accae8561, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/288caa3f02ae44eda9d882979bf3daa4, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/e3c2d994321e4a0faf43c6dcdc6d7301, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/d0f76371409249d0ac2060a78971f32d, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/6e2c62eba4b44653a505d5a8b2aeca84, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/1790990fbedd445c9c30298fded2c5d0, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/7e1bc1334baf4986841cf63f86048e05, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/8424d3b59e024a4f936904348a866bfd, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/97737a0e1f5e4175ad8cf15c50fc5c5e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/6234e0f70d3d4d5aa577c46765b6b912, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/f9bf452eb3cf4dae8e19a914ae7e9882, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/f8de663cafac4f69982b7789c59b6c48, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/865a8b8d3b11466fb982b7173d4946ab, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/9d5bfc637c80460a99a0298a500c94ad, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/82cd24cf2ecd40db97fb27627e80b055, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/9ea92ff87579482d87dd39f72a406ea1, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/90c5f94dd0f74950be6719eaa439eb2e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/167e44c78fef49d1ab60507a6919ca0a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/118b05af3cff414dbdf90098df1db262, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/b5fdc7f7432c47a5be0cce99ca9dfdd1, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/f25b0278d2524505905f9ae2c65f68d6] to archive 2024-12-09T17:22:06,128 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T17:22:06,129 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/be985125491f4fffb330c2bf3129be05 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/be985125491f4fffb330c2bf3129be05 2024-12-09T17:22:06,130 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/ac9db662380b4bf390f0a332faeae2cb to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/ac9db662380b4bf390f0a332faeae2cb 2024-12-09T17:22:06,130 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/3958d990216243479d6ee3a98511b109 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/3958d990216243479d6ee3a98511b109 2024-12-09T17:22:06,131 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/9145093d25d346efa4eb4b9a29cc5b9e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/9145093d25d346efa4eb4b9a29cc5b9e 2024-12-09T17:22:06,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-09T17:22:06,132 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/61d920ca3f9e454ab062ab39688f7f9b to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/61d920ca3f9e454ab062ab39688f7f9b 2024-12-09T17:22:06,133 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/8bec2c9f721f42f2ba68b21accae8561 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/8bec2c9f721f42f2ba68b21accae8561 2024-12-09T17:22:06,134 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/288caa3f02ae44eda9d882979bf3daa4 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/288caa3f02ae44eda9d882979bf3daa4 2024-12-09T17:22:06,134 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/e3c2d994321e4a0faf43c6dcdc6d7301 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/e3c2d994321e4a0faf43c6dcdc6d7301 2024-12-09T17:22:06,135 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/d0f76371409249d0ac2060a78971f32d to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/d0f76371409249d0ac2060a78971f32d 2024-12-09T17:22:06,136 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/6e2c62eba4b44653a505d5a8b2aeca84 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/6e2c62eba4b44653a505d5a8b2aeca84 2024-12-09T17:22:06,137 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/1790990fbedd445c9c30298fded2c5d0 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/1790990fbedd445c9c30298fded2c5d0 2024-12-09T17:22:06,137 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/7e1bc1334baf4986841cf63f86048e05 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/7e1bc1334baf4986841cf63f86048e05 2024-12-09T17:22:06,138 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/8424d3b59e024a4f936904348a866bfd to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/8424d3b59e024a4f936904348a866bfd 2024-12-09T17:22:06,139 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/97737a0e1f5e4175ad8cf15c50fc5c5e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/97737a0e1f5e4175ad8cf15c50fc5c5e 2024-12-09T17:22:06,140 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/6234e0f70d3d4d5aa577c46765b6b912 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/6234e0f70d3d4d5aa577c46765b6b912 2024-12-09T17:22:06,140 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/f9bf452eb3cf4dae8e19a914ae7e9882 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/f9bf452eb3cf4dae8e19a914ae7e9882 2024-12-09T17:22:06,141 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/f8de663cafac4f69982b7789c59b6c48 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/f8de663cafac4f69982b7789c59b6c48 2024-12-09T17:22:06,142 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/865a8b8d3b11466fb982b7173d4946ab to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/865a8b8d3b11466fb982b7173d4946ab 2024-12-09T17:22:06,143 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/9d5bfc637c80460a99a0298a500c94ad to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/9d5bfc637c80460a99a0298a500c94ad 2024-12-09T17:22:06,144 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/82cd24cf2ecd40db97fb27627e80b055 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/82cd24cf2ecd40db97fb27627e80b055 2024-12-09T17:22:06,144 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/9ea92ff87579482d87dd39f72a406ea1 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/9ea92ff87579482d87dd39f72a406ea1 2024-12-09T17:22:06,145 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/90c5f94dd0f74950be6719eaa439eb2e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/90c5f94dd0f74950be6719eaa439eb2e 2024-12-09T17:22:06,146 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/167e44c78fef49d1ab60507a6919ca0a to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/167e44c78fef49d1ab60507a6919ca0a 2024-12-09T17:22:06,147 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/118b05af3cff414dbdf90098df1db262 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/118b05af3cff414dbdf90098df1db262 2024-12-09T17:22:06,147 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/b5fdc7f7432c47a5be0cce99ca9dfdd1 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/b5fdc7f7432c47a5be0cce99ca9dfdd1 2024-12-09T17:22:06,148 DEBUG [StoreCloser-TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/f25b0278d2524505905f9ae2c65f68d6 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/f25b0278d2524505905f9ae2c65f68d6 2024-12-09T17:22:06,152 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=72}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/recovered.edits/408.seqid, newMaxSeqId=408, maxSeqId=4 2024-12-09T17:22:06,152 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=72}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2. 2024-12-09T17:22:06,152 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=72}] regionserver.HRegion(1635): Region close journal for 20e312d1737c5c0e923e8e7c9efe02a2: 2024-12-09T17:22:06,153 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=72}] handler.UnassignRegionHandler(170): Closed 20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,154 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=71 updating hbase:meta row=20e312d1737c5c0e923e8e7c9efe02a2, regionState=CLOSED 2024-12-09T17:22:06,155 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-09T17:22:06,155 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; CloseRegionProcedure 20e312d1737c5c0e923e8e7c9efe02a2, server=80c69eb3c456,42927,1733764865379 in 272 msec 2024-12-09T17:22:06,156 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=71, resume processing ppid=70 2024-12-09T17:22:06,156 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, ppid=70, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=20e312d1737c5c0e923e8e7c9efe02a2, UNASSIGN in 277 msec 2024-12-09T17:22:06,157 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-12-09T17:22:06,157 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 280 msec 2024-12-09T17:22:06,158 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733764926157"}]},"ts":"1733764926157"} 2024-12-09T17:22:06,158 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-09T17:22:06,167 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-09T17:22:06,168 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 340 msec 2024-12-09T17:22:06,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-09T17:22:06,434 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-12-09T17:22:06,436 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-09T17:22:06,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:22:06,440 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=73, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:22:06,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-09T17:22:06,441 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=73, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:22:06,444 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,447 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A, FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B, FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C, FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/recovered.edits] 2024-12-09T17:22:06,451 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/7690e76990a34d80ba3d98d1f345c869 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/7690e76990a34d80ba3d98d1f345c869 2024-12-09T17:22:06,452 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/99166e2787464241bc3b26296e1fe25a to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/99166e2787464241bc3b26296e1fe25a 2024-12-09T17:22:06,453 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/db3aa9b059014fcfa32255f4396d15df to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/A/db3aa9b059014fcfa32255f4396d15df 2024-12-09T17:22:06,456 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/00f73be107d74c9b89bb6f2dc6c2e69e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/00f73be107d74c9b89bb6f2dc6c2e69e 2024-12-09T17:22:06,457 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/a80228e582e44407ac97a881336ab8f7 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/a80228e582e44407ac97a881336ab8f7 2024-12-09T17:22:06,458 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/cf4bc6a2ebcc464690274ebcdf34c369 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/B/cf4bc6a2ebcc464690274ebcdf34c369 2024-12-09T17:22:06,460 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/229e27f5bee1476a900360cfd3fe44dd to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/229e27f5bee1476a900360cfd3fe44dd 2024-12-09T17:22:06,462 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/d81f31f93a4d40208a32834636c0d98e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/d81f31f93a4d40208a32834636c0d98e 2024-12-09T17:22:06,463 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/f3804dbcaaac45f0a31f9a11e5a5fbe9 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/C/f3804dbcaaac45f0a31f9a11e5a5fbe9 2024-12-09T17:22:06,465 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/recovered.edits/408.seqid to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2/recovered.edits/408.seqid 2024-12-09T17:22:06,465 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,465 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-09T17:22:06,466 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-09T17:22:06,466 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-09T17:22:06,469 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412090502809e7181488b81a790b363594a5c_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412090502809e7181488b81a790b363594a5c_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,470 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120907834836cba649f28bc5c1516da523c2_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120907834836cba649f28bc5c1516da523c2_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,471 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412090b69749fe20148d3b7560ccc23bddf36_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412090b69749fe20148d3b7560ccc23bddf36_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,472 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412090e3c1ccb4b9a4f4caa06325c22f83ab0_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412090e3c1ccb4b9a4f4caa06325c22f83ab0_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,472 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412090ef97170bb814f41b8d4fbef90939365_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412090ef97170bb814f41b8d4fbef90939365_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,473 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209102ccd843f4e4a31980f2b7ee2aa46ba_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209102ccd843f4e4a31980f2b7ee2aa46ba_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,474 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120923c9e9df32af43d996e8798b9fc5c357_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120923c9e9df32af43d996e8798b9fc5c357_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,475 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209500907b548b7485a8f7738c68d3d1e1e_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209500907b548b7485a8f7738c68d3d1e1e_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,476 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120950ef158cb6c54345a5d9c6a29934cecd_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120950ef158cb6c54345a5d9c6a29934cecd_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,477 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412098ba8b3501a0e4afc85093ac222998d6f_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412098ba8b3501a0e4afc85093ac222998d6f_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,478 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412099c933828c1444f32889743fe2fd15484_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412099c933828c1444f32889743fe2fd15484_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,479 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209bf25ccea32264229ab1ee3c0ad369c21_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209bf25ccea32264229ab1ee3c0ad369c21_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,479 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209c7dbd715f10d4a8e8217b8d7fe34faa2_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209c7dbd715f10d4a8e8217b8d7fe34faa2_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,480 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209cd797cc1207443238734316d5543f219_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209cd797cc1207443238734316d5543f219_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,481 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209cea2d0ad251f4be5ba8b01e0b2acc030_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209cea2d0ad251f4be5ba8b01e0b2acc030_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,482 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209cf8c66971da74fe3b4fff4abf5dbfe87_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209cf8c66971da74fe3b4fff4abf5dbfe87_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,483 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209d8af33cf2dab4824b3d472c95d45a8e7_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209d8af33cf2dab4824b3d472c95d45a8e7_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,483 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209ea223903aa7542cdacb6797493bd1adf_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209ea223903aa7542cdacb6797493bd1adf_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,484 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209ea9ae4e57f5343768ec2b714eef64d21_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209ea9ae4e57f5343768ec2b714eef64d21_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,485 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209f168941467a6403e937c805cb2e62074_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209f168941467a6403e937c805cb2e62074_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,486 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209f753d588a150467386076b8aad6ba0dc_20e312d1737c5c0e923e8e7c9efe02a2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209f753d588a150467386076b8aad6ba0dc_20e312d1737c5c0e923e8e7c9efe02a2 2024-12-09T17:22:06,487 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-09T17:22:06,488 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=73, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:22:06,490 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-09T17:22:06,492 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-09T17:22:06,493 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=73, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:22:06,493 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-09T17:22:06,493 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733764926493"}]},"ts":"9223372036854775807"} 2024-12-09T17:22:06,495 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-09T17:22:06,495 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 20e312d1737c5c0e923e8e7c9efe02a2, NAME => 'TestAcidGuarantees,,1733764898467.20e312d1737c5c0e923e8e7c9efe02a2.', STARTKEY => '', ENDKEY => ''}] 2024-12-09T17:22:06,495 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-09T17:22:06,495 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733764926495"}]},"ts":"9223372036854775807"} 2024-12-09T17:22:06,497 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-09T17:22:06,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-09T17:22:06,686 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=73, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:22:06,688 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 250 msec 2024-12-09T17:22:06,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-09T17:22:06,743 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-12-09T17:22:06,757 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=242 (was 239) Potentially hanging thread: hconnection-0x7e149453-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x7e149453-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_88836206_22 at /127.0.0.1:35378 [Waiting for operation #1491] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1178314848_22 at /127.0.0.1:35612 [Waiting for operation #1076] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1178314848_22 at /127.0.0.1:35572 [Waiting for operation #1099] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x7e149453-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_88836206_22 at /127.0.0.1:35608 [Waiting for operation #1080] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x7e149453-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/cluster_4037d738-ebcb-129a-bff6-ec03dcaba43b/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/cluster_4037d738-ebcb-129a-bff6-ec03dcaba43b/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=463 (was 454) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=379 (was 308) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4312 (was 4277) - AvailableMemoryMB LEAK? - 2024-12-09T17:22:06,767 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=242, OpenFileDescriptor=463, MaxFileDescriptor=1048576, SystemLoadAverage=379, ProcessCount=11, AvailableMemoryMB=4311 2024-12-09T17:22:06,769 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-09T17:22:06,769 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T17:22:06,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-09T17:22:06,771 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T17:22:06,771 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:06,772 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 74 2024-12-09T17:22:06,772 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T17:22:06,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-09T17:22:06,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742078_1254 (size=963) 2024-12-09T17:22:06,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-09T17:22:07,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-09T17:22:07,184 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4 2024-12-09T17:22:07,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742079_1255 (size=53) 2024-12-09T17:22:07,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-09T17:22:07,591 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T17:22:07,591 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing ba40c75e49d5ed83d5ce6b14bf62ff79, disabling compactions & flushes 2024-12-09T17:22:07,591 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:07,591 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:07,591 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. after waiting 0 ms 2024-12-09T17:22:07,591 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:07,591 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:07,592 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:07,594 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T17:22:07,595 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733764927594"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733764927594"}]},"ts":"1733764927594"} 2024-12-09T17:22:07,596 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-09T17:22:07,598 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T17:22:07,598 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733764927598"}]},"ts":"1733764927598"} 2024-12-09T17:22:07,599 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-09T17:22:07,651 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ba40c75e49d5ed83d5ce6b14bf62ff79, ASSIGN}] 2024-12-09T17:22:07,654 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ba40c75e49d5ed83d5ce6b14bf62ff79, ASSIGN 2024-12-09T17:22:07,655 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=ba40c75e49d5ed83d5ce6b14bf62ff79, ASSIGN; state=OFFLINE, location=80c69eb3c456,42927,1733764865379; forceNewPlan=false, retain=false 2024-12-09T17:22:07,806 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=75 updating hbase:meta row=ba40c75e49d5ed83d5ce6b14bf62ff79, regionState=OPENING, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:07,809 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; OpenRegionProcedure ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379}] 2024-12-09T17:22:07,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-09T17:22:07,962 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:07,967 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=76}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:07,967 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=76}] regionserver.HRegion(7285): Opening region: {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} 2024-12-09T17:22:07,968 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=76}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:07,968 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=76}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T17:22:07,968 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=76}] regionserver.HRegion(7327): checking encryption for ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:07,968 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=76}] regionserver.HRegion(7330): checking classloading for ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:07,970 INFO [StoreOpener-ba40c75e49d5ed83d5ce6b14bf62ff79-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:07,972 INFO [StoreOpener-ba40c75e49d5ed83d5ce6b14bf62ff79-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:22:07,973 INFO [StoreOpener-ba40c75e49d5ed83d5ce6b14bf62ff79-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ba40c75e49d5ed83d5ce6b14bf62ff79 columnFamilyName A 2024-12-09T17:22:07,973 DEBUG [StoreOpener-ba40c75e49d5ed83d5ce6b14bf62ff79-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:07,974 INFO [StoreOpener-ba40c75e49d5ed83d5ce6b14bf62ff79-1 {}] regionserver.HStore(327): Store=ba40c75e49d5ed83d5ce6b14bf62ff79/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:22:07,974 INFO [StoreOpener-ba40c75e49d5ed83d5ce6b14bf62ff79-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:07,975 INFO [StoreOpener-ba40c75e49d5ed83d5ce6b14bf62ff79-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:22:07,975 INFO [StoreOpener-ba40c75e49d5ed83d5ce6b14bf62ff79-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ba40c75e49d5ed83d5ce6b14bf62ff79 columnFamilyName B 2024-12-09T17:22:07,975 DEBUG [StoreOpener-ba40c75e49d5ed83d5ce6b14bf62ff79-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:07,976 INFO [StoreOpener-ba40c75e49d5ed83d5ce6b14bf62ff79-1 {}] regionserver.HStore(327): Store=ba40c75e49d5ed83d5ce6b14bf62ff79/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:22:07,976 INFO [StoreOpener-ba40c75e49d5ed83d5ce6b14bf62ff79-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:07,978 INFO [StoreOpener-ba40c75e49d5ed83d5ce6b14bf62ff79-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:22:07,978 INFO [StoreOpener-ba40c75e49d5ed83d5ce6b14bf62ff79-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ba40c75e49d5ed83d5ce6b14bf62ff79 columnFamilyName C 2024-12-09T17:22:07,978 DEBUG [StoreOpener-ba40c75e49d5ed83d5ce6b14bf62ff79-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:07,979 INFO [StoreOpener-ba40c75e49d5ed83d5ce6b14bf62ff79-1 {}] regionserver.HStore(327): Store=ba40c75e49d5ed83d5ce6b14bf62ff79/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:22:07,979 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=76}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:07,980 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=76}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:07,980 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=76}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:07,982 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=76}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T17:22:07,984 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=76}] regionserver.HRegion(1085): writing seq id for ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:07,986 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=76}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T17:22:07,987 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=76}] regionserver.HRegion(1102): Opened ba40c75e49d5ed83d5ce6b14bf62ff79; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64159287, jitterRate=-0.04395212233066559}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T17:22:07,987 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=76}] regionserver.HRegion(1001): Region open journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:07,988 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=76}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., pid=76, masterSystemTime=1733764927962 2024-12-09T17:22:07,990 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=76}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:07,990 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=76}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:07,990 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=75 updating hbase:meta row=ba40c75e49d5ed83d5ce6b14bf62ff79, regionState=OPEN, openSeqNum=2, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:07,992 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-09T17:22:07,992 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; OpenRegionProcedure ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 in 182 msec 2024-12-09T17:22:07,993 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=75, resume processing ppid=74 2024-12-09T17:22:07,993 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, ppid=74, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ba40c75e49d5ed83d5ce6b14bf62ff79, ASSIGN in 341 msec 2024-12-09T17:22:07,994 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T17:22:07,994 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733764927994"}]},"ts":"1733764927994"} 2024-12-09T17:22:07,995 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-09T17:22:08,009 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T17:22:08,010 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2400 sec 2024-12-09T17:22:08,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-09T17:22:08,883 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 74 completed 2024-12-09T17:22:08,884 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72a7721c to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@faa31c 2024-12-09T17:22:08,897 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@688f4c53, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:22:08,899 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:22:08,900 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39116, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:22:08,901 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T17:22:08,901 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60386, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T17:22:08,904 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7792c763 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22a568ce 2024-12-09T17:22:08,914 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@305a451d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:22:08,915 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0c8a18c7 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e0e280 2024-12-09T17:22:08,926 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67f02d8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:22:08,927 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x45426917 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@473477dd 2024-12-09T17:22:08,939 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21cebefa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:22:08,940 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7e7fc60d to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1a91dc80 2024-12-09T17:22:08,951 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e7c8846, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:22:08,952 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7e66ea50 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6a874cc0 2024-12-09T17:22:08,964 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4093d76e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:22:08,966 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f6119e7 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@31178bc2 2024-12-09T17:22:08,977 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2838b88d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:22:08,980 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7507573f to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78439bc6 2024-12-09T17:22:08,992 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15d2a893, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:22:08,993 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3e5c7476 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a2545d0 2024-12-09T17:22:09,006 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ab3f837, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:22:09,007 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1df84068 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6d039dc2 2024-12-09T17:22:09,018 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2834a215, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:22:09,020 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x644774bd to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@15db087a 2024-12-09T17:22:09,031 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@187234de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:22:09,036 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:22:09,036 DEBUG [hconnection-0x2fa6ce6a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:22:09,037 DEBUG [hconnection-0x752d985f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:22:09,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-12-09T17:22:09,038 DEBUG [hconnection-0x25cfb477-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:22:09,038 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:22:09,039 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39136, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:22:09,039 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39122, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:22:09,039 DEBUG [hconnection-0x786575f7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:22:09,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-09T17:22:09,039 DEBUG [hconnection-0x1f75c46f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:22:09,040 DEBUG [hconnection-0x7883cb07-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:22:09,040 DEBUG [hconnection-0x20295484-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:22:09,040 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39140, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:22:09,040 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39172, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:22:09,040 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39196, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:22:09,040 DEBUG [hconnection-0x1a57398b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:22:09,041 DEBUG [hconnection-0x666c232f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:22:09,041 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39206, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:22:09,041 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39142, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:22:09,041 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39188, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:22:09,041 DEBUG [hconnection-0x6a043d24-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:22:09,042 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:22:09,042 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:22:09,043 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39212, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:22:09,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:09,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ba40c75e49d5ed83d5ce6b14bf62ff79 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-09T17:22:09,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=A 2024-12-09T17:22:09,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:09,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=B 2024-12-09T17:22:09,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:09,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=C 2024-12-09T17:22:09,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:09,047 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39214, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:22:09,073 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/c3d38846300d4689bef0c4bb2132138a is 50, key is test_row_0/A:col10/1733764929046/Put/seqid=0 2024-12-09T17:22:09,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764989072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764989076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,080 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764989077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764989077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764989078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742080_1256 (size=12001) 2024-12-09T17:22:09,093 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/c3d38846300d4689bef0c4bb2132138a 2024-12-09T17:22:09,124 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/69287283ec164c598336e58143b06e7a is 50, key is test_row_0/B:col10/1733764929046/Put/seqid=0 2024-12-09T17:22:09,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-09T17:22:09,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742081_1257 (size=12001) 2024-12-09T17:22:09,155 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/69287283ec164c598336e58143b06e7a 2024-12-09T17:22:09,179 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/1a95daf5300f4e4c8576b8464c42e659 is 50, key is test_row_0/C:col10/1733764929046/Put/seqid=0 2024-12-09T17:22:09,181 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764989179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764989181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764989182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764989182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764989183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,193 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742082_1258 (size=12001) 2024-12-09T17:22:09,194 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-09T17:22:09,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:09,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:09,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:09,194 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:09,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:09,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:09,196 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/1a95daf5300f4e4c8576b8464c42e659 2024-12-09T17:22:09,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/c3d38846300d4689bef0c4bb2132138a as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/c3d38846300d4689bef0c4bb2132138a 2024-12-09T17:22:09,210 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/c3d38846300d4689bef0c4bb2132138a, entries=150, sequenceid=13, filesize=11.7 K 2024-12-09T17:22:09,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/69287283ec164c598336e58143b06e7a as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/69287283ec164c598336e58143b06e7a 2024-12-09T17:22:09,217 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/69287283ec164c598336e58143b06e7a, entries=150, sequenceid=13, filesize=11.7 K 2024-12-09T17:22:09,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/1a95daf5300f4e4c8576b8464c42e659 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/1a95daf5300f4e4c8576b8464c42e659 2024-12-09T17:22:09,225 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/1a95daf5300f4e4c8576b8464c42e659, entries=150, sequenceid=13, filesize=11.7 K 2024-12-09T17:22:09,227 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-09T17:22:09,227 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-09T17:22:09,228 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for ba40c75e49d5ed83d5ce6b14bf62ff79 in 182ms, sequenceid=13, compaction requested=false 2024-12-09T17:22:09,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:09,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-09T17:22:09,347 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,347 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-09T17:22:09,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:09,348 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing ba40c75e49d5ed83d5ce6b14bf62ff79 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-09T17:22:09,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=A 2024-12-09T17:22:09,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:09,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=B 2024-12-09T17:22:09,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:09,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=C 2024-12-09T17:22:09,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:09,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/bf8e603087044d39bb386577018f0fe9 is 50, key is test_row_0/A:col10/1733764929077/Put/seqid=0 2024-12-09T17:22:09,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:09,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:09,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742083_1259 (size=12001) 2024-12-09T17:22:09,387 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/bf8e603087044d39bb386577018f0fe9 2024-12-09T17:22:09,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764989389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764989391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764989392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764989393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,398 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764989394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/1026f195b1234b9cb022b64e130cca57 is 50, key is test_row_0/B:col10/1733764929077/Put/seqid=0 2024-12-09T17:22:09,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742084_1260 (size=12001) 2024-12-09T17:22:09,418 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/1026f195b1234b9cb022b64e130cca57 2024-12-09T17:22:09,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/f7bcbc128780499c9f05a139e0c6dfa6 is 50, key is test_row_0/C:col10/1733764929077/Put/seqid=0 2024-12-09T17:22:09,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742085_1261 (size=12001) 2024-12-09T17:22:09,445 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/f7bcbc128780499c9f05a139e0c6dfa6 2024-12-09T17:22:09,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/bf8e603087044d39bb386577018f0fe9 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/bf8e603087044d39bb386577018f0fe9 2024-12-09T17:22:09,456 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/bf8e603087044d39bb386577018f0fe9, entries=150, sequenceid=39, filesize=11.7 K 2024-12-09T17:22:09,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/1026f195b1234b9cb022b64e130cca57 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/1026f195b1234b9cb022b64e130cca57 2024-12-09T17:22:09,462 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/1026f195b1234b9cb022b64e130cca57, entries=150, sequenceid=39, filesize=11.7 K 2024-12-09T17:22:09,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/f7bcbc128780499c9f05a139e0c6dfa6 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/f7bcbc128780499c9f05a139e0c6dfa6 2024-12-09T17:22:09,470 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/f7bcbc128780499c9f05a139e0c6dfa6, entries=150, sequenceid=39, filesize=11.7 K 2024-12-09T17:22:09,472 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for ba40c75e49d5ed83d5ce6b14bf62ff79 in 125ms, sequenceid=39, compaction requested=false 2024-12-09T17:22:09,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:09,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:09,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-12-09T17:22:09,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-12-09T17:22:09,475 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-09T17:22:09,475 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 431 msec 2024-12-09T17:22:09,476 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 439 msec 2024-12-09T17:22:09,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:09,498 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ba40c75e49d5ed83d5ce6b14bf62ff79 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-09T17:22:09,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=A 2024-12-09T17:22:09,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:09,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=B 2024-12-09T17:22:09,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:09,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=C 2024-12-09T17:22:09,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:09,505 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/be63db0d02d0461493924aa62f1527e2 is 50, key is test_row_0/A:col10/1733764929391/Put/seqid=0 2024-12-09T17:22:09,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742086_1262 (size=12001) 2024-12-09T17:22:09,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764989524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764989524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764989533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764989533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,537 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764989533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,636 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764989634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764989634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764989637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764989637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764989637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-09T17:22:09,642 INFO [Thread-1180 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-12-09T17:22:09,643 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:22:09,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-12-09T17:22:09,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-09T17:22:09,645 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:22:09,645 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:22:09,646 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:22:09,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-09T17:22:09,797 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,798 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-09T17:22:09,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:09,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:09,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:09,798 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:09,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:09,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:09,838 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764989837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,838 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764989837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,842 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764989842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764989842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:09,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764989842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,923 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/be63db0d02d0461493924aa62f1527e2 2024-12-09T17:22:09,934 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/b613e3e6bff842019dea6bb0214ad3bc is 50, key is test_row_0/B:col10/1733764929391/Put/seqid=0 2024-12-09T17:22:09,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-09T17:22:09,950 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:09,950 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-09T17:22:09,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:09,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:09,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:09,951 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:09,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:09,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:09,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742087_1263 (size=12001) 2024-12-09T17:22:09,961 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/b613e3e6bff842019dea6bb0214ad3bc 2024-12-09T17:22:09,967 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/32e55fbdf2fe46acb0306dc36b10c472 is 50, key is test_row_0/C:col10/1733764929391/Put/seqid=0 2024-12-09T17:22:09,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742088_1264 (size=12001) 2024-12-09T17:22:10,103 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,103 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-09T17:22:10,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:10,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:10,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:10,103 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:10,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:10,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:10,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:10,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764990139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:10,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764990139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:10,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764990143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:10,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:10,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764990144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764990144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-09T17:22:10,255 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,255 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-09T17:22:10,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:10,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:10,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:10,256 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:10,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:10,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:10,371 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/32e55fbdf2fe46acb0306dc36b10c472 2024-12-09T17:22:10,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/be63db0d02d0461493924aa62f1527e2 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/be63db0d02d0461493924aa62f1527e2 2024-12-09T17:22:10,380 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/be63db0d02d0461493924aa62f1527e2, entries=150, sequenceid=51, filesize=11.7 K 2024-12-09T17:22:10,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/b613e3e6bff842019dea6bb0214ad3bc as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b613e3e6bff842019dea6bb0214ad3bc 2024-12-09T17:22:10,384 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b613e3e6bff842019dea6bb0214ad3bc, entries=150, sequenceid=51, filesize=11.7 K 2024-12-09T17:22:10,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/32e55fbdf2fe46acb0306dc36b10c472 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/32e55fbdf2fe46acb0306dc36b10c472 2024-12-09T17:22:10,388 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/32e55fbdf2fe46acb0306dc36b10c472, entries=150, sequenceid=51, filesize=11.7 K 2024-12-09T17:22:10,388 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for ba40c75e49d5ed83d5ce6b14bf62ff79 in 890ms, sequenceid=51, compaction requested=true 2024-12-09T17:22:10,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:10,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:22:10,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:10,389 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:10,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:22:10,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:10,389 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:10,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:22:10,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:10,390 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:10,390 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:10,390 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/A is initiating minor compaction (all files) 2024-12-09T17:22:10,390 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/B is initiating minor compaction (all files) 2024-12-09T17:22:10,390 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/B in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:10,390 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/A in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:10,390 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/69287283ec164c598336e58143b06e7a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/1026f195b1234b9cb022b64e130cca57, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b613e3e6bff842019dea6bb0214ad3bc] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=35.2 K 2024-12-09T17:22:10,390 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/c3d38846300d4689bef0c4bb2132138a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/bf8e603087044d39bb386577018f0fe9, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/be63db0d02d0461493924aa62f1527e2] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=35.2 K 2024-12-09T17:22:10,390 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 69287283ec164c598336e58143b06e7a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733764929045 2024-12-09T17:22:10,390 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting c3d38846300d4689bef0c4bb2132138a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733764929045 2024-12-09T17:22:10,391 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 1026f195b1234b9cb022b64e130cca57, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733764929068 2024-12-09T17:22:10,391 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting bf8e603087044d39bb386577018f0fe9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733764929068 2024-12-09T17:22:10,391 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting be63db0d02d0461493924aa62f1527e2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733764929389 2024-12-09T17:22:10,391 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting b613e3e6bff842019dea6bb0214ad3bc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733764929389 2024-12-09T17:22:10,397 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#B#compaction#216 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:10,398 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/ef9769cd002046bf80ec44753401c2c8 is 50, key is test_row_0/B:col10/1733764929391/Put/seqid=0 2024-12-09T17:22:10,400 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#A#compaction#217 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:10,401 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/018e484a14124d0bbadb8ae2398a1830 is 50, key is test_row_0/A:col10/1733764929391/Put/seqid=0 2024-12-09T17:22:10,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742089_1265 (size=12104) 2024-12-09T17:22:10,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742090_1266 (size=12104) 2024-12-09T17:22:10,407 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-09T17:22:10,408 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/018e484a14124d0bbadb8ae2398a1830 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/018e484a14124d0bbadb8ae2398a1830 2024-12-09T17:22:10,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:10,408 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing ba40c75e49d5ed83d5ce6b14bf62ff79 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-09T17:22:10,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=A 2024-12-09T17:22:10,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:10,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=B 2024-12-09T17:22:10,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:10,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=C 2024-12-09T17:22:10,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:10,420 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/A of ba40c75e49d5ed83d5ce6b14bf62ff79 into 018e484a14124d0bbadb8ae2398a1830(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:10,420 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:10,420 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/A, priority=13, startTime=1733764930389; duration=0sec 2024-12-09T17:22:10,420 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:10,420 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:A 2024-12-09T17:22:10,420 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:10,421 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:10,421 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/C is initiating minor compaction (all files) 2024-12-09T17:22:10,421 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/C in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:10,421 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/1a95daf5300f4e4c8576b8464c42e659, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/f7bcbc128780499c9f05a139e0c6dfa6, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/32e55fbdf2fe46acb0306dc36b10c472] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=35.2 K 2024-12-09T17:22:10,422 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a95daf5300f4e4c8576b8464c42e659, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733764929045 2024-12-09T17:22:10,422 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7bcbc128780499c9f05a139e0c6dfa6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733764929068 2024-12-09T17:22:10,422 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32e55fbdf2fe46acb0306dc36b10c472, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733764929389 2024-12-09T17:22:10,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/4b69e42f3e2f429b911335ed6869a5d2 is 50, key is test_row_0/A:col10/1733764929529/Put/seqid=0 2024-12-09T17:22:10,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742091_1267 (size=12001) 2024-12-09T17:22:10,428 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/4b69e42f3e2f429b911335ed6869a5d2 2024-12-09T17:22:10,430 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#C#compaction#219 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:10,430 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/ef421f688ce9438f837060d7f164b55e is 50, key is test_row_0/C:col10/1733764929391/Put/seqid=0 2024-12-09T17:22:10,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742092_1268 (size=12104) 2024-12-09T17:22:10,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/b6c4ff2d32124b78a6d77d0812e56952 is 50, key is test_row_0/B:col10/1733764929529/Put/seqid=0 2024-12-09T17:22:10,440 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/ef421f688ce9438f837060d7f164b55e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/ef421f688ce9438f837060d7f164b55e 2024-12-09T17:22:10,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742093_1269 (size=12001) 2024-12-09T17:22:10,443 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/b6c4ff2d32124b78a6d77d0812e56952 2024-12-09T17:22:10,447 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/C of ba40c75e49d5ed83d5ce6b14bf62ff79 into ef421f688ce9438f837060d7f164b55e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:10,447 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:10,447 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/C, priority=13, startTime=1733764930389; duration=0sec 2024-12-09T17:22:10,447 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:10,447 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:C 2024-12-09T17:22:10,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/53b9913ee08e4df19fa6fbe5b734c341 is 50, key is test_row_0/C:col10/1733764929529/Put/seqid=0 2024-12-09T17:22:10,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742094_1270 (size=12001) 2024-12-09T17:22:10,555 DEBUG [master/80c69eb3c456:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region ad9a19a7365c7aeecc9593a7078cfd44 changed from -1.0 to 0.0, refreshing cache 2024-12-09T17:22:10,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:10,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:10,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:10,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764990649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:10,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764990649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:10,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764990650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,653 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:10,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764990651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,653 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:10,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764990651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-09T17:22:10,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:10,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764990752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:10,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764990752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:10,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764990753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,755 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:10,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764990754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,756 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:10,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764990754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,805 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/ef9769cd002046bf80ec44753401c2c8 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/ef9769cd002046bf80ec44753401c2c8 2024-12-09T17:22:10,809 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/B of ba40c75e49d5ed83d5ce6b14bf62ff79 into ef9769cd002046bf80ec44753401c2c8(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:10,809 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:10,809 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/B, priority=13, startTime=1733764930389; duration=0sec 2024-12-09T17:22:10,809 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:10,809 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:B 2024-12-09T17:22:10,861 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/53b9913ee08e4df19fa6fbe5b734c341 2024-12-09T17:22:10,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/4b69e42f3e2f429b911335ed6869a5d2 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/4b69e42f3e2f429b911335ed6869a5d2 2024-12-09T17:22:10,868 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/4b69e42f3e2f429b911335ed6869a5d2, entries=150, sequenceid=76, filesize=11.7 K 2024-12-09T17:22:10,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/b6c4ff2d32124b78a6d77d0812e56952 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b6c4ff2d32124b78a6d77d0812e56952 2024-12-09T17:22:10,873 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b6c4ff2d32124b78a6d77d0812e56952, entries=150, sequenceid=76, filesize=11.7 K 2024-12-09T17:22:10,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/53b9913ee08e4df19fa6fbe5b734c341 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/53b9913ee08e4df19fa6fbe5b734c341 2024-12-09T17:22:10,877 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/53b9913ee08e4df19fa6fbe5b734c341, entries=150, sequenceid=76, filesize=11.7 K 2024-12-09T17:22:10,878 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for ba40c75e49d5ed83d5ce6b14bf62ff79 in 470ms, sequenceid=76, compaction requested=false 2024-12-09T17:22:10,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:10,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:10,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-09T17:22:10,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-12-09T17:22:10,880 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-12-09T17:22:10,880 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2340 sec 2024-12-09T17:22:10,882 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.2370 sec 2024-12-09T17:22:10,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:10,955 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ba40c75e49d5ed83d5ce6b14bf62ff79 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-09T17:22:10,955 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=A 2024-12-09T17:22:10,955 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:10,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=B 2024-12-09T17:22:10,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:10,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=C 2024-12-09T17:22:10,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:10,959 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/bb9ec014f5f44cf7a4f68a77454a4507 is 50, key is test_row_0/A:col10/1733764930649/Put/seqid=0 2024-12-09T17:22:10,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742095_1271 (size=12001) 2024-12-09T17:22:10,973 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/bb9ec014f5f44cf7a4f68a77454a4507 2024-12-09T17:22:10,980 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/6647eeb5df8b40ea87ccde3cc04deb4c is 50, key is test_row_0/B:col10/1733764930649/Put/seqid=0 2024-12-09T17:22:10,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:10,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764990980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:10,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764990981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,984 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:10,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764990981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:10,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:10,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764990982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764990982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:10,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742096_1272 (size=12001) 2024-12-09T17:22:11,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:11,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764991084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:11,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:11,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764991085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:11,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:11,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764991085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:11,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:11,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764991085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:11,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:11,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764991085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:11,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:11,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764991287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:11,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:11,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764991288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:11,290 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:11,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764991288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:11,290 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:11,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764991289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:11,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:11,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764991292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:11,389 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/6647eeb5df8b40ea87ccde3cc04deb4c 2024-12-09T17:22:11,412 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/8fc127fbbff34ebfa54fec4c11f44402 is 50, key is test_row_0/C:col10/1733764930649/Put/seqid=0 2024-12-09T17:22:11,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742097_1273 (size=12001) 2024-12-09T17:22:11,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:11,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:11,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764991590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:11,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764991590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:11,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:11,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764991590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:11,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:11,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764991592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:11,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:11,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764991595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:11,683 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-09T17:22:11,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-09T17:22:11,748 INFO [Thread-1180 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-12-09T17:22:11,749 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:22:11,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-12-09T17:22:11,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-09T17:22:11,750 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:22:11,751 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:22:11,751 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:22:11,820 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/8fc127fbbff34ebfa54fec4c11f44402 2024-12-09T17:22:11,824 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/bb9ec014f5f44cf7a4f68a77454a4507 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/bb9ec014f5f44cf7a4f68a77454a4507 2024-12-09T17:22:11,827 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/bb9ec014f5f44cf7a4f68a77454a4507, entries=150, sequenceid=92, filesize=11.7 K 2024-12-09T17:22:11,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/6647eeb5df8b40ea87ccde3cc04deb4c as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/6647eeb5df8b40ea87ccde3cc04deb4c 2024-12-09T17:22:11,832 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/6647eeb5df8b40ea87ccde3cc04deb4c, entries=150, sequenceid=92, filesize=11.7 K 2024-12-09T17:22:11,833 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/8fc127fbbff34ebfa54fec4c11f44402 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/8fc127fbbff34ebfa54fec4c11f44402 2024-12-09T17:22:11,836 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/8fc127fbbff34ebfa54fec4c11f44402, entries=150, sequenceid=92, filesize=11.7 K 2024-12-09T17:22:11,837 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for ba40c75e49d5ed83d5ce6b14bf62ff79 in 881ms, sequenceid=92, compaction requested=true 2024-12-09T17:22:11,837 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:11,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:22:11,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:11,837 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:11,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:22:11,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:11,837 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:11,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:22:11,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:11,838 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:11,838 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:11,838 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/B is initiating minor compaction (all files) 2024-12-09T17:22:11,838 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/A is initiating minor compaction (all files) 2024-12-09T17:22:11,838 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/B in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:11,838 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/A in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:11,838 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/ef9769cd002046bf80ec44753401c2c8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b6c4ff2d32124b78a6d77d0812e56952, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/6647eeb5df8b40ea87ccde3cc04deb4c] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=35.3 K 2024-12-09T17:22:11,838 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/018e484a14124d0bbadb8ae2398a1830, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/4b69e42f3e2f429b911335ed6869a5d2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/bb9ec014f5f44cf7a4f68a77454a4507] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=35.3 K 2024-12-09T17:22:11,839 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting ef9769cd002046bf80ec44753401c2c8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733764929389 2024-12-09T17:22:11,839 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 018e484a14124d0bbadb8ae2398a1830, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733764929389 2024-12-09T17:22:11,839 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b69e42f3e2f429b911335ed6869a5d2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1733764929523 2024-12-09T17:22:11,839 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting b6c4ff2d32124b78a6d77d0812e56952, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1733764929523 2024-12-09T17:22:11,839 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb9ec014f5f44cf7a4f68a77454a4507, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733764930649 2024-12-09T17:22:11,839 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 6647eeb5df8b40ea87ccde3cc04deb4c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733764930649 2024-12-09T17:22:11,848 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#A#compaction#225 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:11,848 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#B#compaction#226 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:11,849 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/0067abf942c844cb9cf3ee5543f83eec is 50, key is test_row_0/A:col10/1733764930649/Put/seqid=0 2024-12-09T17:22:11,849 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/e216badc9d034f12ac26c4538b210a78 is 50, key is test_row_0/B:col10/1733764930649/Put/seqid=0 2024-12-09T17:22:11,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-09T17:22:11,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742098_1274 (size=12207) 2024-12-09T17:22:11,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742099_1275 (size=12207) 2024-12-09T17:22:11,872 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/e216badc9d034f12ac26c4538b210a78 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/e216badc9d034f12ac26c4538b210a78 2024-12-09T17:22:11,872 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/0067abf942c844cb9cf3ee5543f83eec as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/0067abf942c844cb9cf3ee5543f83eec 2024-12-09T17:22:11,877 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/A of ba40c75e49d5ed83d5ce6b14bf62ff79 into 0067abf942c844cb9cf3ee5543f83eec(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:11,877 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:11,877 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/B of ba40c75e49d5ed83d5ce6b14bf62ff79 into e216badc9d034f12ac26c4538b210a78(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:11,878 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/A, priority=13, startTime=1733764931837; duration=0sec 2024-12-09T17:22:11,878 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:11,878 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/B, priority=13, startTime=1733764931837; duration=0sec 2024-12-09T17:22:11,878 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:11,878 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:A 2024-12-09T17:22:11,878 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:11,878 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:11,878 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:B 2024-12-09T17:22:11,879 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:11,879 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/C is initiating minor compaction (all files) 2024-12-09T17:22:11,879 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/C in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:11,879 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/ef421f688ce9438f837060d7f164b55e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/53b9913ee08e4df19fa6fbe5b734c341, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/8fc127fbbff34ebfa54fec4c11f44402] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=35.3 K 2024-12-09T17:22:11,879 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef421f688ce9438f837060d7f164b55e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733764929389 2024-12-09T17:22:11,880 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53b9913ee08e4df19fa6fbe5b734c341, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1733764929523 2024-12-09T17:22:11,880 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8fc127fbbff34ebfa54fec4c11f44402, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733764930649 2024-12-09T17:22:11,890 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#C#compaction#227 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:11,890 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/97ca08739cd747e39d5634ecacf6453e is 50, key is test_row_0/C:col10/1733764930649/Put/seqid=0 2024-12-09T17:22:11,902 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:11,903 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-09T17:22:11,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:11,903 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing ba40c75e49d5ed83d5ce6b14bf62ff79 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-09T17:22:11,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=A 2024-12-09T17:22:11,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:11,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=B 2024-12-09T17:22:11,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:11,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=C 2024-12-09T17:22:11,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:11,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742100_1276 (size=12207) 2024-12-09T17:22:11,912 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/97ca08739cd747e39d5634ecacf6453e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/97ca08739cd747e39d5634ecacf6453e 2024-12-09T17:22:11,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/9e8c1eb36f4f47b0aba00de02f0da806 is 50, key is test_row_0/A:col10/1733764930981/Put/seqid=0 2024-12-09T17:22:11,917 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/C of ba40c75e49d5ed83d5ce6b14bf62ff79 into 97ca08739cd747e39d5634ecacf6453e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:11,917 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:11,917 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/C, priority=13, startTime=1733764931837; duration=0sec 2024-12-09T17:22:11,917 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:11,917 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:C 2024-12-09T17:22:11,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742101_1277 (size=12001) 2024-12-09T17:22:12,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-09T17:22:12,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:12,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:12,101 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:12,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764992099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:12,101 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:12,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764992099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:12,101 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:12,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764992100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:12,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:12,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764992101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:12,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:12,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764992101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:12,203 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:12,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764992202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:12,203 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:12,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764992202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:12,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:12,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764992202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:12,204 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:12,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764992204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:12,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:12,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764992207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:12,322 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/9e8c1eb36f4f47b0aba00de02f0da806 2024-12-09T17:22:12,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/b1c7f55f6ef14a55ad430feb81d54df8 is 50, key is test_row_0/B:col10/1733764930981/Put/seqid=0 2024-12-09T17:22:12,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742102_1278 (size=12001) 2024-12-09T17:22:12,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-09T17:22:12,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:12,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764992404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:12,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:12,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764992404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:12,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:12,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764992404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:12,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:12,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764992406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:12,411 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:12,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764992409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:12,709 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:12,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764992707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:12,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:12,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:12,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764992708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:12,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764992708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:12,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:12,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764992709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:12,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:12,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764992713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:12,733 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/b1c7f55f6ef14a55ad430feb81d54df8 2024-12-09T17:22:12,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/bda8b0d929c445ed9e6bcb0736f4814f is 50, key is test_row_0/C:col10/1733764930981/Put/seqid=0 2024-12-09T17:22:12,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742103_1279 (size=12001) 2024-12-09T17:22:12,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-09T17:22:13,143 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/bda8b0d929c445ed9e6bcb0736f4814f 2024-12-09T17:22:13,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/9e8c1eb36f4f47b0aba00de02f0da806 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/9e8c1eb36f4f47b0aba00de02f0da806 2024-12-09T17:22:13,151 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/9e8c1eb36f4f47b0aba00de02f0da806, entries=150, sequenceid=119, filesize=11.7 K 2024-12-09T17:22:13,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/b1c7f55f6ef14a55ad430feb81d54df8 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b1c7f55f6ef14a55ad430feb81d54df8 2024-12-09T17:22:13,155 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b1c7f55f6ef14a55ad430feb81d54df8, entries=150, sequenceid=119, filesize=11.7 K 2024-12-09T17:22:13,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/bda8b0d929c445ed9e6bcb0736f4814f as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/bda8b0d929c445ed9e6bcb0736f4814f 2024-12-09T17:22:13,159 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/bda8b0d929c445ed9e6bcb0736f4814f, entries=150, sequenceid=119, filesize=11.7 K 2024-12-09T17:22:13,160 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for ba40c75e49d5ed83d5ce6b14bf62ff79 in 1257ms, sequenceid=119, compaction requested=false 2024-12-09T17:22:13,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:13,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:13,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-12-09T17:22:13,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-12-09T17:22:13,162 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-09T17:22:13,162 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4100 sec 2024-12-09T17:22:13,163 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.4130 sec 2024-12-09T17:22:13,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:13,213 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ba40c75e49d5ed83d5ce6b14bf62ff79 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-09T17:22:13,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=A 2024-12-09T17:22:13,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:13,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=B 2024-12-09T17:22:13,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:13,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=C 2024-12-09T17:22:13,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:13,218 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/290ef2d9c9da41a5b429ea88e3fc7ea2 is 50, key is test_row_0/A:col10/1733764933212/Put/seqid=0 2024-12-09T17:22:13,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742104_1280 (size=12151) 2024-12-09T17:22:13,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:13,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764993226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:13,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:13,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:13,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764993227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:13,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764993226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:13,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:13,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764993228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:13,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:13,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764993230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:13,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:13,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764993331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:13,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:13,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764993331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:13,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:13,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764993331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:13,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:13,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764993331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:13,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:13,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764993333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:13,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:13,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764993533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:13,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:13,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764993533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:13,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:13,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764993534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:13,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:13,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764993534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:13,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:13,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764993535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:13,622 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/290ef2d9c9da41a5b429ea88e3fc7ea2 2024-12-09T17:22:13,628 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/d9ecb68c8e2e406caa6604bc3ec6b6f3 is 50, key is test_row_0/B:col10/1733764933212/Put/seqid=0 2024-12-09T17:22:13,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742105_1281 (size=12151) 2024-12-09T17:22:13,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:13,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764993836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:13,837 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:13,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764993836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:13,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:13,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764993836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:13,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:13,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764993837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:13,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:13,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764993838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:13,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-09T17:22:13,854 INFO [Thread-1180 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-12-09T17:22:13,854 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:22:13,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-12-09T17:22:13,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-09T17:22:13,856 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:22:13,856 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:22:13,856 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:22:13,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-09T17:22:14,007 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:14,008 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-09T17:22:14,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:14,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:14,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:14,008 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:14,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:14,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:14,082 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/d9ecb68c8e2e406caa6604bc3ec6b6f3 2024-12-09T17:22:14,089 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/bf355dbc89904528a2494a72cc50f1c0 is 50, key is test_row_0/C:col10/1733764933212/Put/seqid=0 2024-12-09T17:22:14,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742106_1282 (size=12151) 2024-12-09T17:22:14,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-09T17:22:14,160 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:14,160 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-09T17:22:14,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:14,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:14,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:14,160 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:14,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:14,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:14,312 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:14,312 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-09T17:22:14,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:14,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:14,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:14,313 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:14,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:14,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:14,341 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:14,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764994339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:14,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:14,341 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:14,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764994339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:14,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764994339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:14,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:14,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764994340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:14,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:14,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764994343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:14,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-09T17:22:14,464 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:14,465 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-09T17:22:14,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:14,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:14,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:14,465 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:14,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:14,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:14,502 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/bf355dbc89904528a2494a72cc50f1c0 2024-12-09T17:22:14,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/290ef2d9c9da41a5b429ea88e3fc7ea2 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/290ef2d9c9da41a5b429ea88e3fc7ea2 2024-12-09T17:22:14,509 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/290ef2d9c9da41a5b429ea88e3fc7ea2, entries=150, sequenceid=132, filesize=11.9 K 2024-12-09T17:22:14,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/d9ecb68c8e2e406caa6604bc3ec6b6f3 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/d9ecb68c8e2e406caa6604bc3ec6b6f3 2024-12-09T17:22:14,514 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/d9ecb68c8e2e406caa6604bc3ec6b6f3, entries=150, sequenceid=132, filesize=11.9 K 2024-12-09T17:22:14,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/bf355dbc89904528a2494a72cc50f1c0 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/bf355dbc89904528a2494a72cc50f1c0 2024-12-09T17:22:14,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/bf355dbc89904528a2494a72cc50f1c0, entries=150, sequenceid=132, filesize=11.9 K 2024-12-09T17:22:14,520 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ba40c75e49d5ed83d5ce6b14bf62ff79 in 1307ms, sequenceid=132, compaction requested=true 2024-12-09T17:22:14,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:14,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:22:14,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:14,521 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:14,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:22:14,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:14,521 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:14,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:22:14,521 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:14,522 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:14,522 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:14,522 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/B is initiating minor compaction (all files) 2024-12-09T17:22:14,522 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/A is initiating minor compaction (all files) 2024-12-09T17:22:14,522 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/A in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:14,522 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/B in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:14,522 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/e216badc9d034f12ac26c4538b210a78, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b1c7f55f6ef14a55ad430feb81d54df8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/d9ecb68c8e2e406caa6604bc3ec6b6f3] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=35.5 K 2024-12-09T17:22:14,522 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/0067abf942c844cb9cf3ee5543f83eec, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/9e8c1eb36f4f47b0aba00de02f0da806, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/290ef2d9c9da41a5b429ea88e3fc7ea2] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=35.5 K 2024-12-09T17:22:14,522 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0067abf942c844cb9cf3ee5543f83eec, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733764930649 2024-12-09T17:22:14,522 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting e216badc9d034f12ac26c4538b210a78, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733764930649 2024-12-09T17:22:14,523 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting b1c7f55f6ef14a55ad430feb81d54df8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733764930976 2024-12-09T17:22:14,523 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e8c1eb36f4f47b0aba00de02f0da806, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733764930976 2024-12-09T17:22:14,523 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting d9ecb68c8e2e406caa6604bc3ec6b6f3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733764932100 2024-12-09T17:22:14,523 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 290ef2d9c9da41a5b429ea88e3fc7ea2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733764932100 2024-12-09T17:22:14,529 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#A#compaction#234 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:14,529 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#B#compaction#235 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:14,530 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/19d4897a9b4640c485ccc6c86124a00c is 50, key is test_row_0/A:col10/1733764933212/Put/seqid=0 2024-12-09T17:22:14,531 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/7f26fa13612941a0a3918b241486792d is 50, key is test_row_0/B:col10/1733764933212/Put/seqid=0 2024-12-09T17:22:14,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742107_1283 (size=12459) 2024-12-09T17:22:14,571 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/19d4897a9b4640c485ccc6c86124a00c as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/19d4897a9b4640c485ccc6c86124a00c 2024-12-09T17:22:14,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742108_1284 (size=12459) 2024-12-09T17:22:14,578 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/A of ba40c75e49d5ed83d5ce6b14bf62ff79 into 19d4897a9b4640c485ccc6c86124a00c(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:14,578 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:14,578 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/A, priority=13, startTime=1733764934521; duration=0sec 2024-12-09T17:22:14,578 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:14,578 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:A 2024-12-09T17:22:14,578 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:14,580 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:14,580 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/C is initiating minor compaction (all files) 2024-12-09T17:22:14,580 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/C in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:14,580 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/97ca08739cd747e39d5634ecacf6453e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/bda8b0d929c445ed9e6bcb0736f4814f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/bf355dbc89904528a2494a72cc50f1c0] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=35.5 K 2024-12-09T17:22:14,580 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97ca08739cd747e39d5634ecacf6453e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733764930649 2024-12-09T17:22:14,581 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting bda8b0d929c445ed9e6bcb0736f4814f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733764930976 2024-12-09T17:22:14,581 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting bf355dbc89904528a2494a72cc50f1c0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733764932100 2024-12-09T17:22:14,583 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/7f26fa13612941a0a3918b241486792d as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/7f26fa13612941a0a3918b241486792d 2024-12-09T17:22:14,588 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#C#compaction#236 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:14,588 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/B of ba40c75e49d5ed83d5ce6b14bf62ff79 into 7f26fa13612941a0a3918b241486792d(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:14,588 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:14,588 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/B, priority=13, startTime=1733764934521; duration=0sec 2024-12-09T17:22:14,588 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:14,588 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:B 2024-12-09T17:22:14,588 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/f36fe77004164a4e8ab2117c7a551b3b is 50, key is test_row_0/C:col10/1733764933212/Put/seqid=0 2024-12-09T17:22:14,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742109_1285 (size=12459) 2024-12-09T17:22:14,617 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:14,617 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-09T17:22:14,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:14,617 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing ba40c75e49d5ed83d5ce6b14bf62ff79 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-09T17:22:14,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=A 2024-12-09T17:22:14,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:14,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=B 2024-12-09T17:22:14,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:14,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=C 2024-12-09T17:22:14,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:14,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/69097445bfda4f4c899c0254b83625b8 is 50, key is test_row_0/A:col10/1733764933229/Put/seqid=0 2024-12-09T17:22:14,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742110_1286 (size=12151) 2024-12-09T17:22:14,629 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/69097445bfda4f4c899c0254b83625b8 2024-12-09T17:22:14,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/a3686a301dde4939bdeba5628c7ace11 is 50, key is test_row_0/B:col10/1733764933229/Put/seqid=0 2024-12-09T17:22:14,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742111_1287 (size=12151) 2024-12-09T17:22:14,641 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/a3686a301dde4939bdeba5628c7ace11 2024-12-09T17:22:14,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/66d4b4a363db4c47bd00f10039b5d2d6 is 50, key is test_row_0/C:col10/1733764933229/Put/seqid=0 2024-12-09T17:22:14,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742112_1288 (size=12151) 2024-12-09T17:22:14,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-09T17:22:15,001 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/f36fe77004164a4e8ab2117c7a551b3b as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/f36fe77004164a4e8ab2117c7a551b3b 2024-12-09T17:22:15,005 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/C of ba40c75e49d5ed83d5ce6b14bf62ff79 into f36fe77004164a4e8ab2117c7a551b3b(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:15,005 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:15,005 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/C, priority=13, startTime=1733764934521; duration=0sec 2024-12-09T17:22:15,005 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:15,005 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:C 2024-12-09T17:22:15,057 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/66d4b4a363db4c47bd00f10039b5d2d6 2024-12-09T17:22:15,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/69097445bfda4f4c899c0254b83625b8 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/69097445bfda4f4c899c0254b83625b8 2024-12-09T17:22:15,064 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/69097445bfda4f4c899c0254b83625b8, entries=150, sequenceid=158, filesize=11.9 K 2024-12-09T17:22:15,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/a3686a301dde4939bdeba5628c7ace11 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/a3686a301dde4939bdeba5628c7ace11 2024-12-09T17:22:15,069 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/a3686a301dde4939bdeba5628c7ace11, entries=150, sequenceid=158, filesize=11.9 K 2024-12-09T17:22:15,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/66d4b4a363db4c47bd00f10039b5d2d6 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/66d4b4a363db4c47bd00f10039b5d2d6 2024-12-09T17:22:15,073 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/66d4b4a363db4c47bd00f10039b5d2d6, entries=150, sequenceid=158, filesize=11.9 K 2024-12-09T17:22:15,074 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for ba40c75e49d5ed83d5ce6b14bf62ff79 in 457ms, sequenceid=158, compaction requested=false 2024-12-09T17:22:15,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:15,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:15,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-12-09T17:22:15,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-12-09T17:22:15,077 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-09T17:22:15,077 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2200 sec 2024-12-09T17:22:15,078 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 1.2230 sec 2024-12-09T17:22:15,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:15,350 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ba40c75e49d5ed83d5ce6b14bf62ff79 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-09T17:22:15,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=A 2024-12-09T17:22:15,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:15,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=B 2024-12-09T17:22:15,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:15,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=C 2024-12-09T17:22:15,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:15,353 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/a0f6bb0620c64c42b16d00c613ed0754 is 50, key is test_row_0/A:col10/1733764935349/Put/seqid=0 2024-12-09T17:22:15,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742113_1289 (size=16931) 2024-12-09T17:22:15,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:15,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764995360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:15,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:15,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764995361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:15,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:15,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764995361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:15,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:15,364 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:15,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764995362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:15,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764995362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:15,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:15,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764995463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:15,465 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:15,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764995464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:15,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:15,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764995464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:15,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:15,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764995464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:15,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:15,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764995465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:15,666 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:15,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764995665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:15,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:15,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764995665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:15,667 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:15,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764995666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:15,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:15,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764995669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:15,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:15,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764995669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:15,757 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/a0f6bb0620c64c42b16d00c613ed0754 2024-12-09T17:22:15,763 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/759daad050c5474d8c9766f1b42bf422 is 50, key is test_row_0/B:col10/1733764935349/Put/seqid=0 2024-12-09T17:22:15,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742114_1290 (size=12151) 2024-12-09T17:22:15,766 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/759daad050c5474d8c9766f1b42bf422 2024-12-09T17:22:15,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/6c7280be1f944d91830487a8e7cb2111 is 50, key is test_row_0/C:col10/1733764935349/Put/seqid=0 2024-12-09T17:22:15,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742115_1291 (size=12151) 2024-12-09T17:22:15,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-09T17:22:15,959 INFO [Thread-1180 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-12-09T17:22:15,960 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:22:15,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-12-09T17:22:15,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-09T17:22:15,961 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:22:15,961 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:22:15,961 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:22:15,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:15,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764995968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:15,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:15,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764995969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:15,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:15,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764995970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:15,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:15,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764995972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:15,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:15,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764995973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:16,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-09T17:22:16,112 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:16,113 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-09T17:22:16,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:16,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:16,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:16,113 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:16,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:16,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:16,176 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/6c7280be1f944d91830487a8e7cb2111 2024-12-09T17:22:16,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/a0f6bb0620c64c42b16d00c613ed0754 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/a0f6bb0620c64c42b16d00c613ed0754 2024-12-09T17:22:16,183 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/a0f6bb0620c64c42b16d00c613ed0754, entries=250, sequenceid=171, filesize=16.5 K 2024-12-09T17:22:16,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/759daad050c5474d8c9766f1b42bf422 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/759daad050c5474d8c9766f1b42bf422 2024-12-09T17:22:16,187 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/759daad050c5474d8c9766f1b42bf422, entries=150, sequenceid=171, filesize=11.9 K 2024-12-09T17:22:16,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/6c7280be1f944d91830487a8e7cb2111 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/6c7280be1f944d91830487a8e7cb2111 2024-12-09T17:22:16,192 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/6c7280be1f944d91830487a8e7cb2111, entries=150, sequenceid=171, filesize=11.9 K 2024-12-09T17:22:16,193 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ba40c75e49d5ed83d5ce6b14bf62ff79 in 843ms, sequenceid=171, compaction requested=true 2024-12-09T17:22:16,193 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:16,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:22:16,193 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:16,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:16,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:22:16,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:16,193 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:16,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:22:16,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:16,194 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41541 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:16,194 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:16,194 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/A is initiating minor compaction (all files) 2024-12-09T17:22:16,194 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/B is initiating minor compaction (all files) 2024-12-09T17:22:16,194 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/A in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:16,194 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/B in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:16,194 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/19d4897a9b4640c485ccc6c86124a00c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/69097445bfda4f4c899c0254b83625b8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/a0f6bb0620c64c42b16d00c613ed0754] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=40.6 K 2024-12-09T17:22:16,194 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/7f26fa13612941a0a3918b241486792d, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/a3686a301dde4939bdeba5628c7ace11, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/759daad050c5474d8c9766f1b42bf422] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=35.9 K 2024-12-09T17:22:16,194 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 19d4897a9b4640c485ccc6c86124a00c, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733764932100 2024-12-09T17:22:16,194 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f26fa13612941a0a3918b241486792d, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733764932100 2024-12-09T17:22:16,194 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69097445bfda4f4c899c0254b83625b8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733764933226 2024-12-09T17:22:16,194 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting a3686a301dde4939bdeba5628c7ace11, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733764933226 2024-12-09T17:22:16,195 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a0f6bb0620c64c42b16d00c613ed0754, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733764935345 2024-12-09T17:22:16,195 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 759daad050c5474d8c9766f1b42bf422, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733764935345 2024-12-09T17:22:16,202 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#A#compaction#243 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:16,202 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#B#compaction#244 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:16,202 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/0935f5715e3f4a4f98afe5dd89751b4e is 50, key is test_row_0/A:col10/1733764935349/Put/seqid=0 2024-12-09T17:22:16,202 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/c22fb3018cdd4ffc9a21e838b8f38c63 is 50, key is test_row_0/B:col10/1733764935349/Put/seqid=0 2024-12-09T17:22:16,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742116_1292 (size=12561) 2024-12-09T17:22:16,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742117_1293 (size=12561) 2024-12-09T17:22:16,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-09T17:22:16,265 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:16,265 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-09T17:22:16,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:16,265 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing ba40c75e49d5ed83d5ce6b14bf62ff79 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-09T17:22:16,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=A 2024-12-09T17:22:16,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:16,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=B 2024-12-09T17:22:16,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:16,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=C 2024-12-09T17:22:16,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:16,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/dd8c87088ed34d6ea8c3802f39623bec is 50, key is test_row_0/A:col10/1733764935361/Put/seqid=0 2024-12-09T17:22:16,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742118_1294 (size=12151) 2024-12-09T17:22:16,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:16,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:16,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:16,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764996476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:16,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:16,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764996477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:16,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:16,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764996477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:16,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:16,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764996478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:16,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:16,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764996479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:16,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-09T17:22:16,580 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:16,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764996579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:16,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:16,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764996579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:16,583 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:16,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:16,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764996581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:16,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764996581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:16,612 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/0935f5715e3f4a4f98afe5dd89751b4e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/0935f5715e3f4a4f98afe5dd89751b4e 2024-12-09T17:22:16,616 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/A of ba40c75e49d5ed83d5ce6b14bf62ff79 into 0935f5715e3f4a4f98afe5dd89751b4e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:16,616 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:16,616 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/A, priority=13, startTime=1733764936193; duration=0sec 2024-12-09T17:22:16,616 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:16,616 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:A 2024-12-09T17:22:16,616 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:16,617 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:16,617 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/C is initiating minor compaction (all files) 2024-12-09T17:22:16,617 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/C in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:16,617 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/f36fe77004164a4e8ab2117c7a551b3b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/66d4b4a363db4c47bd00f10039b5d2d6, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/6c7280be1f944d91830487a8e7cb2111] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=35.9 K 2024-12-09T17:22:16,618 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting f36fe77004164a4e8ab2117c7a551b3b, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733764932100 2024-12-09T17:22:16,618 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66d4b4a363db4c47bd00f10039b5d2d6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733764933226 2024-12-09T17:22:16,619 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c7280be1f944d91830487a8e7cb2111, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733764935345 2024-12-09T17:22:16,622 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/c22fb3018cdd4ffc9a21e838b8f38c63 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/c22fb3018cdd4ffc9a21e838b8f38c63 2024-12-09T17:22:16,625 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#C#compaction#246 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:16,626 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/aee9f7e8ea67490c958df30b6d7139c8 is 50, key is test_row_0/C:col10/1733764935349/Put/seqid=0 2024-12-09T17:22:16,627 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/B of ba40c75e49d5ed83d5ce6b14bf62ff79 into c22fb3018cdd4ffc9a21e838b8f38c63(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:16,627 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:16,627 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/B, priority=13, startTime=1733764936193; duration=0sec 2024-12-09T17:22:16,627 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:16,627 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:B 2024-12-09T17:22:16,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742119_1295 (size=12561) 2024-12-09T17:22:16,673 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/dd8c87088ed34d6ea8c3802f39623bec 2024-12-09T17:22:16,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/b31374953ba643c49c09a98462c629df is 50, key is test_row_0/B:col10/1733764935361/Put/seqid=0 2024-12-09T17:22:16,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742120_1296 (size=12151) 2024-12-09T17:22:16,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:16,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764996782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:16,783 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:16,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764996782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:16,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:16,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764996785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:16,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:16,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764996785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:17,046 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/aee9f7e8ea67490c958df30b6d7139c8 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/aee9f7e8ea67490c958df30b6d7139c8 2024-12-09T17:22:17,050 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/C of ba40c75e49d5ed83d5ce6b14bf62ff79 into aee9f7e8ea67490c958df30b6d7139c8(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:17,050 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:17,050 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/C, priority=13, startTime=1733764936193; duration=0sec 2024-12-09T17:22:17,050 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:17,051 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:C 2024-12-09T17:22:17,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-09T17:22:17,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:17,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764997083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:17,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:17,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764997084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:17,087 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/b31374953ba643c49c09a98462c629df 2024-12-09T17:22:17,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:17,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764997086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:17,087 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:17,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764997086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:17,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/68df5d6307ab4aadb1f55dc3bdb284f5 is 50, key is test_row_0/C:col10/1733764935361/Put/seqid=0 2024-12-09T17:22:17,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742121_1297 (size=12151) 2024-12-09T17:22:17,491 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:17,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764997491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:17,498 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/68df5d6307ab4aadb1f55dc3bdb284f5 2024-12-09T17:22:17,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/dd8c87088ed34d6ea8c3802f39623bec as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/dd8c87088ed34d6ea8c3802f39623bec 2024-12-09T17:22:17,505 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/dd8c87088ed34d6ea8c3802f39623bec, entries=150, sequenceid=195, filesize=11.9 K 2024-12-09T17:22:17,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/b31374953ba643c49c09a98462c629df as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b31374953ba643c49c09a98462c629df 2024-12-09T17:22:17,509 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b31374953ba643c49c09a98462c629df, entries=150, sequenceid=195, filesize=11.9 K 2024-12-09T17:22:17,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/68df5d6307ab4aadb1f55dc3bdb284f5 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/68df5d6307ab4aadb1f55dc3bdb284f5 2024-12-09T17:22:17,513 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/68df5d6307ab4aadb1f55dc3bdb284f5, entries=150, sequenceid=195, filesize=11.9 K 2024-12-09T17:22:17,514 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for ba40c75e49d5ed83d5ce6b14bf62ff79 in 1249ms, sequenceid=195, compaction requested=false 2024-12-09T17:22:17,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:17,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:17,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-12-09T17:22:17,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-12-09T17:22:17,515 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-12-09T17:22:17,516 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5540 sec 2024-12-09T17:22:17,516 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 1.5560 sec 2024-12-09T17:22:17,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:17,587 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ba40c75e49d5ed83d5ce6b14bf62ff79 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-09T17:22:17,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=A 2024-12-09T17:22:17,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:17,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=B 2024-12-09T17:22:17,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:17,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=C 2024-12-09T17:22:17,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:17,591 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/63e2556201df469ea100c98043f7db40 is 50, key is test_row_0/A:col10/1733764936475/Put/seqid=0 2024-12-09T17:22:17,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742122_1298 (size=12151) 2024-12-09T17:22:17,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:17,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764997602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:17,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:17,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764997602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:17,605 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:17,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:17,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764997603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:17,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764997603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:17,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:17,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764997706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:17,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:17,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764997706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:17,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:17,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764997706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:17,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:17,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764997706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:17,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:17,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764997907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:17,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:17,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764997907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:17,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:17,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764997908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:17,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:17,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764997909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:17,996 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/63e2556201df469ea100c98043f7db40 2024-12-09T17:22:18,003 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/ff080486f786439399e9b1fcef8dbe70 is 50, key is test_row_0/B:col10/1733764936475/Put/seqid=0 2024-12-09T17:22:18,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742123_1299 (size=12151) 2024-12-09T17:22:18,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-09T17:22:18,064 INFO [Thread-1180 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-12-09T17:22:18,065 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:22:18,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-12-09T17:22:18,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-09T17:22:18,066 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:22:18,066 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:22:18,066 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:22:18,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-09T17:22:18,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:18,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764998210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:18,212 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:18,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764998211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:18,212 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:18,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764998211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:18,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:18,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764998211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:18,217 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:18,218 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-09T17:22:18,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:18,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:18,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:18,218 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:18,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:18,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:18,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-09T17:22:18,370 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:18,370 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-09T17:22:18,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:18,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:18,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:18,370 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:18,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:18,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:18,407 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/ff080486f786439399e9b1fcef8dbe70 2024-12-09T17:22:18,412 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/aea0611db49e4a83b4c1e400bdea74a1 is 50, key is test_row_0/C:col10/1733764936475/Put/seqid=0 2024-12-09T17:22:18,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742124_1300 (size=12151) 2024-12-09T17:22:18,522 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:18,522 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-09T17:22:18,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:18,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:18,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:18,523 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:18,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:18,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:18,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-09T17:22:18,674 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:18,675 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-09T17:22:18,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:18,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:18,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:18,675 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:18,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:18,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:18,716 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:18,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764998715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:18,717 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:18,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764998716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:18,717 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:18,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764998716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:18,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:18,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764998716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:18,816 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/aea0611db49e4a83b4c1e400bdea74a1 2024-12-09T17:22:18,819 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/63e2556201df469ea100c98043f7db40 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/63e2556201df469ea100c98043f7db40 2024-12-09T17:22:18,822 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/63e2556201df469ea100c98043f7db40, entries=150, sequenceid=211, filesize=11.9 K 2024-12-09T17:22:18,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/ff080486f786439399e9b1fcef8dbe70 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/ff080486f786439399e9b1fcef8dbe70 2024-12-09T17:22:18,826 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/ff080486f786439399e9b1fcef8dbe70, entries=150, sequenceid=211, filesize=11.9 K 2024-12-09T17:22:18,827 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:18,827 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-09T17:22:18,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:18,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:18,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:18,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/aea0611db49e4a83b4c1e400bdea74a1 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/aea0611db49e4a83b4c1e400bdea74a1 2024-12-09T17:22:18,828 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:18,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:18,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:18,831 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/aea0611db49e4a83b4c1e400bdea74a1, entries=150, sequenceid=211, filesize=11.9 K 2024-12-09T17:22:18,832 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for ba40c75e49d5ed83d5ce6b14bf62ff79 in 1245ms, sequenceid=211, compaction requested=true 2024-12-09T17:22:18,832 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:18,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:22:18,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:18,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:22:18,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:18,832 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:18,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:22:18,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:18,832 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:18,833 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:18,833 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:18,833 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/A is initiating minor compaction (all files) 2024-12-09T17:22:18,833 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/B is initiating minor compaction (all files) 2024-12-09T17:22:18,833 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/A in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:18,833 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/B in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:18,833 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/c22fb3018cdd4ffc9a21e838b8f38c63, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b31374953ba643c49c09a98462c629df, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/ff080486f786439399e9b1fcef8dbe70] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=36.0 K 2024-12-09T17:22:18,833 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/0935f5715e3f4a4f98afe5dd89751b4e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/dd8c87088ed34d6ea8c3802f39623bec, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/63e2556201df469ea100c98043f7db40] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=36.0 K 2024-12-09T17:22:18,834 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0935f5715e3f4a4f98afe5dd89751b4e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733764935345 2024-12-09T17:22:18,834 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting c22fb3018cdd4ffc9a21e838b8f38c63, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733764935345 2024-12-09T17:22:18,834 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting b31374953ba643c49c09a98462c629df, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733764935360 2024-12-09T17:22:18,834 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd8c87088ed34d6ea8c3802f39623bec, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733764935360 2024-12-09T17:22:18,834 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting ff080486f786439399e9b1fcef8dbe70, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733764936475 2024-12-09T17:22:18,834 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 63e2556201df469ea100c98043f7db40, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733764936475 2024-12-09T17:22:18,841 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#B#compaction#252 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:18,842 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/8c5b0ddd78e744fb9b34836fed041cb8 is 50, key is test_row_0/B:col10/1733764936475/Put/seqid=0 2024-12-09T17:22:18,844 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#A#compaction#253 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:18,844 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/84a5689ce1fa471eab49a3399fd3fb6b is 50, key is test_row_0/A:col10/1733764936475/Put/seqid=0 2024-12-09T17:22:18,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742125_1301 (size=12663) 2024-12-09T17:22:18,866 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/8c5b0ddd78e744fb9b34836fed041cb8 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/8c5b0ddd78e744fb9b34836fed041cb8 2024-12-09T17:22:18,871 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/B of ba40c75e49d5ed83d5ce6b14bf62ff79 into 8c5b0ddd78e744fb9b34836fed041cb8(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:18,871 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:18,871 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/B, priority=13, startTime=1733764938832; duration=0sec 2024-12-09T17:22:18,871 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:18,871 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:B 2024-12-09T17:22:18,871 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:18,872 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:18,872 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/C is initiating minor compaction (all files) 2024-12-09T17:22:18,872 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/C in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:18,872 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/aee9f7e8ea67490c958df30b6d7139c8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/68df5d6307ab4aadb1f55dc3bdb284f5, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/aea0611db49e4a83b4c1e400bdea74a1] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=36.0 K 2024-12-09T17:22:18,873 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting aee9f7e8ea67490c958df30b6d7139c8, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733764935345 2024-12-09T17:22:18,873 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 68df5d6307ab4aadb1f55dc3bdb284f5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1733764935360 2024-12-09T17:22:18,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742126_1302 (size=12663) 2024-12-09T17:22:18,874 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting aea0611db49e4a83b4c1e400bdea74a1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733764936475 2024-12-09T17:22:18,879 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#C#compaction#254 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:18,880 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/3f22a4af317d4898ba6fefb62c564925 is 50, key is test_row_0/C:col10/1733764936475/Put/seqid=0 2024-12-09T17:22:18,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742127_1303 (size=12663) 2024-12-09T17:22:18,889 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/3f22a4af317d4898ba6fefb62c564925 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/3f22a4af317d4898ba6fefb62c564925 2024-12-09T17:22:18,893 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/C of ba40c75e49d5ed83d5ce6b14bf62ff79 into 3f22a4af317d4898ba6fefb62c564925(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:18,893 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:18,893 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/C, priority=13, startTime=1733764938832; duration=0sec 2024-12-09T17:22:18,893 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:18,893 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:C 2024-12-09T17:22:18,979 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:18,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-09T17:22:18,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:18,980 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing ba40c75e49d5ed83d5ce6b14bf62ff79 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-09T17:22:18,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=A 2024-12-09T17:22:18,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:18,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=B 2024-12-09T17:22:18,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:18,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=C 2024-12-09T17:22:18,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:18,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/293bfb0bc6f04b3fb250709170e960aa is 50, key is test_row_0/A:col10/1733764937599/Put/seqid=0 2024-12-09T17:22:19,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742128_1304 (size=12151) 2024-12-09T17:22:19,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-09T17:22:19,277 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/84a5689ce1fa471eab49a3399fd3fb6b as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/84a5689ce1fa471eab49a3399fd3fb6b 2024-12-09T17:22:19,281 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/A of ba40c75e49d5ed83d5ce6b14bf62ff79 into 84a5689ce1fa471eab49a3399fd3fb6b(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:19,281 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:19,281 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/A, priority=13, startTime=1733764938832; duration=0sec 2024-12-09T17:22:19,281 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:19,281 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:A 2024-12-09T17:22:19,441 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/293bfb0bc6f04b3fb250709170e960aa 2024-12-09T17:22:19,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/e8f6fccf2dd045dfa6b66eef50dd255f is 50, key is test_row_0/B:col10/1733764937599/Put/seqid=0 2024-12-09T17:22:19,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742129_1305 (size=12151) 2024-12-09T17:22:19,497 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:19,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:19,515 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:19,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764999514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:19,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:19,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764999616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:19,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:19,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733764999717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:19,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:19,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733764999719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:19,725 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:19,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733764999724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:19,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:19,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733764999726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:19,819 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:19,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733764999819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:19,853 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/e8f6fccf2dd045dfa6b66eef50dd255f 2024-12-09T17:22:19,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/64f997520fd242528ca5c05ead6596fc is 50, key is test_row_0/C:col10/1733764937599/Put/seqid=0 2024-12-09T17:22:19,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742130_1306 (size=12151) 2024-12-09T17:22:20,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:20,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765000120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:20,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-09T17:22:20,262 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/64f997520fd242528ca5c05ead6596fc 2024-12-09T17:22:20,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/293bfb0bc6f04b3fb250709170e960aa as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/293bfb0bc6f04b3fb250709170e960aa 2024-12-09T17:22:20,269 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/293bfb0bc6f04b3fb250709170e960aa, entries=150, sequenceid=236, filesize=11.9 K 2024-12-09T17:22:20,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/e8f6fccf2dd045dfa6b66eef50dd255f as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/e8f6fccf2dd045dfa6b66eef50dd255f 2024-12-09T17:22:20,273 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/e8f6fccf2dd045dfa6b66eef50dd255f, entries=150, sequenceid=236, filesize=11.9 K 2024-12-09T17:22:20,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/64f997520fd242528ca5c05ead6596fc as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/64f997520fd242528ca5c05ead6596fc 2024-12-09T17:22:20,277 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/64f997520fd242528ca5c05ead6596fc, entries=150, sequenceid=236, filesize=11.9 K 2024-12-09T17:22:20,277 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for ba40c75e49d5ed83d5ce6b14bf62ff79 in 1297ms, sequenceid=236, compaction requested=false 2024-12-09T17:22:20,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:20,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:20,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-09T17:22:20,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-09T17:22:20,280 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-12-09T17:22:20,280 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2130 sec 2024-12-09T17:22:20,281 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 2.2150 sec 2024-12-09T17:22:20,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:20,626 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ba40c75e49d5ed83d5ce6b14bf62ff79 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-09T17:22:20,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=A 2024-12-09T17:22:20,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:20,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=B 2024-12-09T17:22:20,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:20,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=C 2024-12-09T17:22:20,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:20,630 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/38da993775ae418bb7d574042283466c is 50, key is test_row_0/A:col10/1733764940625/Put/seqid=0 2024-12-09T17:22:20,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742131_1307 (size=9757) 2024-12-09T17:22:20,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:20,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765000660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:20,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:20,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765000762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:20,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:20,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765000965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:21,033 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/38da993775ae418bb7d574042283466c 2024-12-09T17:22:21,039 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/93fe0e9632e649e3a4cc89210d26a737 is 50, key is test_row_0/B:col10/1733764940625/Put/seqid=0 2024-12-09T17:22:21,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742132_1308 (size=9757) 2024-12-09T17:22:21,268 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:21,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765001267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:21,442 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/93fe0e9632e649e3a4cc89210d26a737 2024-12-09T17:22:21,448 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/222bb496bc1c41688480de78ba967dd0 is 50, key is test_row_0/C:col10/1733764940625/Put/seqid=0 2024-12-09T17:22:21,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742133_1309 (size=9757) 2024-12-09T17:22:21,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:21,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733765001731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:21,732 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:21,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733765001732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:21,733 DEBUG [Thread-1176 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4130 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., hostname=80c69eb3c456,42927,1733764865379, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:22:21,733 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:21,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733765001732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:21,735 DEBUG [Thread-1170 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4132 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., hostname=80c69eb3c456,42927,1733764865379, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:22:21,736 DEBUG [Thread-1172 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4133 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., hostname=80c69eb3c456,42927,1733764865379, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:22:21,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:21,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733765001743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:21,744 DEBUG [Thread-1178 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4141 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., hostname=80c69eb3c456,42927,1733764865379, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:22:21,770 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:21,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765001769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:21,852 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/222bb496bc1c41688480de78ba967dd0 2024-12-09T17:22:21,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/38da993775ae418bb7d574042283466c as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/38da993775ae418bb7d574042283466c 2024-12-09T17:22:21,860 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/38da993775ae418bb7d574042283466c, entries=100, sequenceid=251, filesize=9.5 K 2024-12-09T17:22:21,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/93fe0e9632e649e3a4cc89210d26a737 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/93fe0e9632e649e3a4cc89210d26a737 2024-12-09T17:22:21,865 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/93fe0e9632e649e3a4cc89210d26a737, entries=100, sequenceid=251, filesize=9.5 K 2024-12-09T17:22:21,866 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/222bb496bc1c41688480de78ba967dd0 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/222bb496bc1c41688480de78ba967dd0 2024-12-09T17:22:21,869 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/222bb496bc1c41688480de78ba967dd0, entries=100, sequenceid=251, filesize=9.5 K 2024-12-09T17:22:21,870 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for ba40c75e49d5ed83d5ce6b14bf62ff79 in 1244ms, sequenceid=251, compaction requested=true 2024-12-09T17:22:21,870 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:21,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:22:21,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:21,870 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:21,870 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:21,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:22:21,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:21,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:22:21,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:21,871 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34571 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:21,871 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34571 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:21,871 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/A is initiating minor compaction (all files) 2024-12-09T17:22:21,871 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/B is initiating minor compaction (all files) 2024-12-09T17:22:21,871 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/A in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:21,871 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/B in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:21,871 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/84a5689ce1fa471eab49a3399fd3fb6b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/293bfb0bc6f04b3fb250709170e960aa, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/38da993775ae418bb7d574042283466c] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=33.8 K 2024-12-09T17:22:21,871 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/8c5b0ddd78e744fb9b34836fed041cb8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/e8f6fccf2dd045dfa6b66eef50dd255f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/93fe0e9632e649e3a4cc89210d26a737] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=33.8 K 2024-12-09T17:22:21,871 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c5b0ddd78e744fb9b34836fed041cb8, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733764936475 2024-12-09T17:22:21,871 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84a5689ce1fa471eab49a3399fd3fb6b, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733764936475 2024-12-09T17:22:21,872 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 293bfb0bc6f04b3fb250709170e960aa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733764937599 2024-12-09T17:22:21,872 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting e8f6fccf2dd045dfa6b66eef50dd255f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733764937599 2024-12-09T17:22:21,872 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38da993775ae418bb7d574042283466c, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733764939511 2024-12-09T17:22:21,872 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 93fe0e9632e649e3a4cc89210d26a737, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733764939511 2024-12-09T17:22:21,877 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#A#compaction#261 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:21,877 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#B#compaction#262 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:21,878 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/36f3d1a9ad93423480950a91c76ef3b2 is 50, key is test_row_0/A:col10/1733764940625/Put/seqid=0 2024-12-09T17:22:21,878 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/29ab95a37837464d82a4cd91f9ac6fa1 is 50, key is test_row_0/B:col10/1733764940625/Put/seqid=0 2024-12-09T17:22:21,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742135_1311 (size=12765) 2024-12-09T17:22:21,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742134_1310 (size=12765) 2024-12-09T17:22:21,886 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/36f3d1a9ad93423480950a91c76ef3b2 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/36f3d1a9ad93423480950a91c76ef3b2 2024-12-09T17:22:21,889 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/A of ba40c75e49d5ed83d5ce6b14bf62ff79 into 36f3d1a9ad93423480950a91c76ef3b2(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:21,889 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:21,889 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/A, priority=13, startTime=1733764941870; duration=0sec 2024-12-09T17:22:21,890 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:21,890 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:A 2024-12-09T17:22:21,890 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:21,890 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34571 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:21,890 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/C is initiating minor compaction (all files) 2024-12-09T17:22:21,890 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/C in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:21,891 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/3f22a4af317d4898ba6fefb62c564925, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/64f997520fd242528ca5c05ead6596fc, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/222bb496bc1c41688480de78ba967dd0] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=33.8 K 2024-12-09T17:22:21,891 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f22a4af317d4898ba6fefb62c564925, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733764936475 2024-12-09T17:22:21,891 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 64f997520fd242528ca5c05ead6596fc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733764937599 2024-12-09T17:22:21,891 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 222bb496bc1c41688480de78ba967dd0, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733764939511 2024-12-09T17:22:21,898 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#C#compaction#263 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:21,898 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/2b8d62d44ece4f6f975f7455b310b96f is 50, key is test_row_0/C:col10/1733764940625/Put/seqid=0 2024-12-09T17:22:21,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742136_1312 (size=12765) 2024-12-09T17:22:22,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-09T17:22:22,170 INFO [Thread-1180 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-12-09T17:22:22,171 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:22:22,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-12-09T17:22:22,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-09T17:22:22,172 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:22:22,172 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:22:22,172 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:22:22,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-09T17:22:22,286 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/29ab95a37837464d82a4cd91f9ac6fa1 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/29ab95a37837464d82a4cd91f9ac6fa1 2024-12-09T17:22:22,289 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/B of ba40c75e49d5ed83d5ce6b14bf62ff79 into 29ab95a37837464d82a4cd91f9ac6fa1(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:22,289 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:22,289 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/B, priority=13, startTime=1733764941870; duration=0sec 2024-12-09T17:22:22,289 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:22,289 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:B 2024-12-09T17:22:22,322 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/2b8d62d44ece4f6f975f7455b310b96f as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/2b8d62d44ece4f6f975f7455b310b96f 2024-12-09T17:22:22,323 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:22,324 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-09T17:22:22,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:22,324 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing ba40c75e49d5ed83d5ce6b14bf62ff79 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-09T17:22:22,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=A 2024-12-09T17:22:22,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:22,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=B 2024-12-09T17:22:22,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:22,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=C 2024-12-09T17:22:22,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:22,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/047f5aae1b974c19890f970c970caa05 is 50, key is test_row_0/A:col10/1733764940654/Put/seqid=0 2024-12-09T17:22:22,330 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/C of ba40c75e49d5ed83d5ce6b14bf62ff79 into 2b8d62d44ece4f6f975f7455b310b96f(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:22,330 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:22,330 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/C, priority=13, startTime=1733764941870; duration=0sec 2024-12-09T17:22:22,330 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:22,330 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:C 2024-12-09T17:22:22,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742137_1313 (size=12301) 2024-12-09T17:22:22,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-09T17:22:22,733 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/047f5aae1b974c19890f970c970caa05 2024-12-09T17:22:22,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/9fb5a5fd5c744b08809e691fa07e71d4 is 50, key is test_row_0/B:col10/1733764940654/Put/seqid=0 2024-12-09T17:22:22,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742138_1314 (size=12301) 2024-12-09T17:22:22,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-09T17:22:22,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:22,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:22,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:22,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765002799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:22,902 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:22,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765002901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:23,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:23,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765003104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:23,143 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/9fb5a5fd5c744b08809e691fa07e71d4 2024-12-09T17:22:23,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/816c6b13d2564f06aa3d7af52f55fc8e is 50, key is test_row_0/C:col10/1733764940654/Put/seqid=0 2024-12-09T17:22:23,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742139_1315 (size=12301) 2024-12-09T17:22:23,159 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/816c6b13d2564f06aa3d7af52f55fc8e 2024-12-09T17:22:23,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/047f5aae1b974c19890f970c970caa05 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/047f5aae1b974c19890f970c970caa05 2024-12-09T17:22:23,165 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/047f5aae1b974c19890f970c970caa05, entries=150, sequenceid=275, filesize=12.0 K 2024-12-09T17:22:23,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/9fb5a5fd5c744b08809e691fa07e71d4 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/9fb5a5fd5c744b08809e691fa07e71d4 2024-12-09T17:22:23,168 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/9fb5a5fd5c744b08809e691fa07e71d4, entries=150, sequenceid=275, filesize=12.0 K 2024-12-09T17:22:23,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/816c6b13d2564f06aa3d7af52f55fc8e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/816c6b13d2564f06aa3d7af52f55fc8e 2024-12-09T17:22:23,172 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/816c6b13d2564f06aa3d7af52f55fc8e, entries=150, sequenceid=275, filesize=12.0 K 2024-12-09T17:22:23,172 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for ba40c75e49d5ed83d5ce6b14bf62ff79 in 848ms, sequenceid=275, compaction requested=false 2024-12-09T17:22:23,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:23,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:23,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-12-09T17:22:23,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-12-09T17:22:23,174 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-09T17:22:23,174 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0010 sec 2024-12-09T17:22:23,176 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 1.0040 sec 2024-12-09T17:22:23,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-09T17:22:23,275 INFO [Thread-1180 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-12-09T17:22:23,276 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:22:23,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees 2024-12-09T17:22:23,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-09T17:22:23,277 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:22:23,277 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:22:23,277 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:22:23,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-09T17:22:23,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:23,409 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ba40c75e49d5ed83d5ce6b14bf62ff79 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-09T17:22:23,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=A 2024-12-09T17:22:23,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:23,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=B 2024-12-09T17:22:23,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:23,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=C 2024-12-09T17:22:23,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:23,413 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/990c652d046a48b48ed4c2318b7858d9 is 50, key is test_row_0/A:col10/1733764942798/Put/seqid=0 2024-12-09T17:22:23,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742140_1316 (size=14741) 2024-12-09T17:22:23,417 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/990c652d046a48b48ed4c2318b7858d9 2024-12-09T17:22:23,423 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/5fe843551345449094bb4843a581375c is 50, key is test_row_0/B:col10/1733764942798/Put/seqid=0 2024-12-09T17:22:23,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742141_1317 (size=12301) 2024-12-09T17:22:23,427 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/5fe843551345449094bb4843a581375c 2024-12-09T17:22:23,429 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:23,430 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-09T17:22:23,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:23,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:23,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:23,430 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:23,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:23,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:23,434 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/5e20a73cc0c6427bbc0e360f31b58adf is 50, key is test_row_0/C:col10/1733764942798/Put/seqid=0 2024-12-09T17:22:23,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742142_1318 (size=12301) 2024-12-09T17:22:23,452 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:23,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765003451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:23,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:23,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765003553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:23,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-09T17:22:23,582 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:23,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-09T17:22:23,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:23,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:23,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:23,583 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:23,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:23,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:23,734 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:23,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-09T17:22:23,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:23,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:23,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:23,735 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:23,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:23,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:23,758 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:23,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765003756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:23,845 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/5e20a73cc0c6427bbc0e360f31b58adf 2024-12-09T17:22:23,848 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/990c652d046a48b48ed4c2318b7858d9 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/990c652d046a48b48ed4c2318b7858d9 2024-12-09T17:22:23,851 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/990c652d046a48b48ed4c2318b7858d9, entries=200, sequenceid=291, filesize=14.4 K 2024-12-09T17:22:23,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/5fe843551345449094bb4843a581375c as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/5fe843551345449094bb4843a581375c 2024-12-09T17:22:23,855 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/5fe843551345449094bb4843a581375c, entries=150, sequenceid=291, filesize=12.0 K 2024-12-09T17:22:23,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/5e20a73cc0c6427bbc0e360f31b58adf as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/5e20a73cc0c6427bbc0e360f31b58adf 2024-12-09T17:22:23,859 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/5e20a73cc0c6427bbc0e360f31b58adf, entries=150, sequenceid=291, filesize=12.0 K 2024-12-09T17:22:23,860 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for ba40c75e49d5ed83d5ce6b14bf62ff79 in 451ms, sequenceid=291, compaction requested=true 2024-12-09T17:22:23,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:23,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:22:23,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:23,860 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:23,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:22:23,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:23,860 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:23,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:22:23,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:23,861 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:23,861 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/B is initiating minor compaction (all files) 2024-12-09T17:22:23,861 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/B in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:23,861 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/29ab95a37837464d82a4cd91f9ac6fa1, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/9fb5a5fd5c744b08809e691fa07e71d4, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/5fe843551345449094bb4843a581375c] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=36.5 K 2024-12-09T17:22:23,861 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39807 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:23,861 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/A is initiating minor compaction (all files) 2024-12-09T17:22:23,861 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/A in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:23,861 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 29ab95a37837464d82a4cd91f9ac6fa1, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733764937602 2024-12-09T17:22:23,861 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/36f3d1a9ad93423480950a91c76ef3b2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/047f5aae1b974c19890f970c970caa05, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/990c652d046a48b48ed4c2318b7858d9] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=38.9 K 2024-12-09T17:22:23,861 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9fb5a5fd5c744b08809e691fa07e71d4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1733764940654 2024-12-09T17:22:23,861 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 36f3d1a9ad93423480950a91c76ef3b2, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733764937602 2024-12-09T17:22:23,862 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 5fe843551345449094bb4843a581375c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733764942787 2024-12-09T17:22:23,862 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 047f5aae1b974c19890f970c970caa05, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1733764940654 2024-12-09T17:22:23,862 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 990c652d046a48b48ed4c2318b7858d9, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733764942787 2024-12-09T17:22:23,867 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#A#compaction#270 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:23,867 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#B#compaction#271 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:23,868 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/03551a1ba2204eecadad88244fe656e6 is 50, key is test_row_0/A:col10/1733764942798/Put/seqid=0 2024-12-09T17:22:23,868 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/4dbc2851039c4f808cef4973f384d707 is 50, key is test_row_0/B:col10/1733764942798/Put/seqid=0 2024-12-09T17:22:23,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-09T17:22:23,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742144_1320 (size=13017) 2024-12-09T17:22:23,886 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/03551a1ba2204eecadad88244fe656e6 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/03551a1ba2204eecadad88244fe656e6 2024-12-09T17:22:23,886 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:23,886 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-09T17:22:23,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:23,887 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2837): Flushing ba40c75e49d5ed83d5ce6b14bf62ff79 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-09T17:22:23,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=A 2024-12-09T17:22:23,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:23,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=B 2024-12-09T17:22:23,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:23,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=C 2024-12-09T17:22:23,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:23,893 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/A of ba40c75e49d5ed83d5ce6b14bf62ff79 into 03551a1ba2204eecadad88244fe656e6(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:23,893 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:23,893 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/A, priority=13, startTime=1733764943860; duration=0sec 2024-12-09T17:22:23,893 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:23,894 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:A 2024-12-09T17:22:23,894 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:23,895 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:23,895 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/C is initiating minor compaction (all files) 2024-12-09T17:22:23,895 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/C in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:23,896 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/2b8d62d44ece4f6f975f7455b310b96f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/816c6b13d2564f06aa3d7af52f55fc8e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/5e20a73cc0c6427bbc0e360f31b58adf] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=36.5 K 2024-12-09T17:22:23,896 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b8d62d44ece4f6f975f7455b310b96f, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733764937602 2024-12-09T17:22:23,898 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 816c6b13d2564f06aa3d7af52f55fc8e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1733764940654 2024-12-09T17:22:23,898 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e20a73cc0c6427bbc0e360f31b58adf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733764942787 2024-12-09T17:22:23,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/f96dcf72382241df831fa6a907b4d84f is 50, key is test_row_0/A:col10/1733764943440/Put/seqid=0 2024-12-09T17:22:23,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742143_1319 (size=13017) 2024-12-09T17:22:23,919 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#C#compaction#273 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:23,920 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/e035a3c0896c4631a2d3602342c1f4bd is 50, key is test_row_0/C:col10/1733764942798/Put/seqid=0 2024-12-09T17:22:23,926 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/4dbc2851039c4f808cef4973f384d707 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/4dbc2851039c4f808cef4973f384d707 2024-12-09T17:22:23,932 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/B of ba40c75e49d5ed83d5ce6b14bf62ff79 into 4dbc2851039c4f808cef4973f384d707(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:23,932 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:23,933 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/B, priority=13, startTime=1733764943860; duration=0sec 2024-12-09T17:22:23,933 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:23,933 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:B 2024-12-09T17:22:23,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742145_1321 (size=12301) 2024-12-09T17:22:23,949 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/f96dcf72382241df831fa6a907b4d84f 2024-12-09T17:22:23,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/ab1b6d0744dc46fb8440a73a6f221681 is 50, key is test_row_0/B:col10/1733764943440/Put/seqid=0 2024-12-09T17:22:23,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742146_1322 (size=13017) 2024-12-09T17:22:23,968 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/e035a3c0896c4631a2d3602342c1f4bd as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/e035a3c0896c4631a2d3602342c1f4bd 2024-12-09T17:22:23,975 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/C of ba40c75e49d5ed83d5ce6b14bf62ff79 into e035a3c0896c4631a2d3602342c1f4bd(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:23,975 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:23,975 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/C, priority=13, startTime=1733764943860; duration=0sec 2024-12-09T17:22:23,975 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:23,975 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:C 2024-12-09T17:22:23,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742147_1323 (size=12301) 2024-12-09T17:22:24,061 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:24,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:24,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:24,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765004086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:24,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:24,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765004189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:24,378 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/ab1b6d0744dc46fb8440a73a6f221681 2024-12-09T17:22:24,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-09T17:22:24,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/dcea4713a5d646ecb1ccc3a0f8f7b049 is 50, key is test_row_0/C:col10/1733764943440/Put/seqid=0 2024-12-09T17:22:24,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742148_1324 (size=12301) 2024-12-09T17:22:24,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:24,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765004391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:24,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765004693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:24,787 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/dcea4713a5d646ecb1ccc3a0f8f7b049 2024-12-09T17:22:24,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/f96dcf72382241df831fa6a907b4d84f as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/f96dcf72382241df831fa6a907b4d84f 2024-12-09T17:22:24,794 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/f96dcf72382241df831fa6a907b4d84f, entries=150, sequenceid=312, filesize=12.0 K 2024-12-09T17:22:24,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/ab1b6d0744dc46fb8440a73a6f221681 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/ab1b6d0744dc46fb8440a73a6f221681 2024-12-09T17:22:24,799 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/ab1b6d0744dc46fb8440a73a6f221681, entries=150, sequenceid=312, filesize=12.0 K 2024-12-09T17:22:24,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/dcea4713a5d646ecb1ccc3a0f8f7b049 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/dcea4713a5d646ecb1ccc3a0f8f7b049 2024-12-09T17:22:24,802 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/dcea4713a5d646ecb1ccc3a0f8f7b049, entries=150, sequenceid=312, filesize=12.0 K 2024-12-09T17:22:24,802 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for ba40c75e49d5ed83d5ce6b14bf62ff79 in 916ms, sequenceid=312, compaction requested=false 2024-12-09T17:22:24,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2538): Flush status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:24,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:24,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=92 2024-12-09T17:22:24,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=92 2024-12-09T17:22:24,804 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-12-09T17:22:24,804 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5260 sec 2024-12-09T17:22:24,805 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees in 1.5280 sec 2024-12-09T17:22:25,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:25,202 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ba40c75e49d5ed83d5ce6b14bf62ff79 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-09T17:22:25,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=A 2024-12-09T17:22:25,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:25,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=B 2024-12-09T17:22:25,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:25,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=C 2024-12-09T17:22:25,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:25,205 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/727db08797844b4db3c04a0c90e5db72 is 50, key is test_row_0/A:col10/1733764945200/Put/seqid=0 2024-12-09T17:22:25,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742149_1325 (size=14741) 2024-12-09T17:22:25,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:25,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765005245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:25,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:25,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765005347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:25,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-09T17:22:25,380 INFO [Thread-1180 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-12-09T17:22:25,381 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:22:25,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees 2024-12-09T17:22:25,382 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=93, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:22:25,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-09T17:22:25,383 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=93, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:22:25,383 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:22:25,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-09T17:22:25,535 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:25,535 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-12-09T17:22:25,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:25,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:25,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:25,535 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:25,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:25,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:25,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:25,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765005550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:25,608 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/727db08797844b4db3c04a0c90e5db72 2024-12-09T17:22:25,623 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/1c0dc6018c0343b78fb766be6b187f2e is 50, key is test_row_0/B:col10/1733764945200/Put/seqid=0 2024-12-09T17:22:25,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742150_1326 (size=12301) 2024-12-09T17:22:25,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-09T17:22:25,687 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:25,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-12-09T17:22:25,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:25,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:25,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:25,688 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:25,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:25,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:25,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:25,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733765005743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:25,745 DEBUG [Thread-1170 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8141 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., hostname=80c69eb3c456,42927,1733764865379, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:22:25,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:25,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39142 deadline: 1733765005765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:25,767 DEBUG [Thread-1172 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8164 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., hostname=80c69eb3c456,42927,1733764865379, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:22:25,773 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:25,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39206 deadline: 1733765005772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:25,774 DEBUG [Thread-1176 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8172 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., hostname=80c69eb3c456,42927,1733764865379, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:22:25,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:25,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39122 deadline: 1733765005781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:25,782 DEBUG [Thread-1178 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8179 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., hostname=80c69eb3c456,42927,1733764865379, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:22:25,839 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:25,840 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-12-09T17:22:25,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:25,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:25,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:25,840 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:25,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:25,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:25,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:25,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765005852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:25,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-09T17:22:25,992 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:25,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-12-09T17:22:25,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:25,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:25,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:25,992 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:25,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:25,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:26,027 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/1c0dc6018c0343b78fb766be6b187f2e 2024-12-09T17:22:26,035 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/6805dee87ec54db984f6cd73ad94bf19 is 50, key is test_row_0/C:col10/1733764945200/Put/seqid=0 2024-12-09T17:22:26,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742151_1327 (size=12301) 2024-12-09T17:22:26,144 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:26,144 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-12-09T17:22:26,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:26,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:26,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:26,145 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:26,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:26,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:26,296 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:26,297 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-12-09T17:22:26,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:26,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:26,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:26,297 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:26,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:26,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:26,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:26,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765006357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:26,439 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/6805dee87ec54db984f6cd73ad94bf19 2024-12-09T17:22:26,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/727db08797844b4db3c04a0c90e5db72 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/727db08797844b4db3c04a0c90e5db72 2024-12-09T17:22:26,446 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/727db08797844b4db3c04a0c90e5db72, entries=200, sequenceid=331, filesize=14.4 K 2024-12-09T17:22:26,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/1c0dc6018c0343b78fb766be6b187f2e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/1c0dc6018c0343b78fb766be6b187f2e 2024-12-09T17:22:26,449 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:26,449 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-12-09T17:22:26,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:26,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:26,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:26,449 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:26,450 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/1c0dc6018c0343b78fb766be6b187f2e, entries=150, sequenceid=331, filesize=12.0 K 2024-12-09T17:22:26,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:26,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:26,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/6805dee87ec54db984f6cd73ad94bf19 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/6805dee87ec54db984f6cd73ad94bf19 2024-12-09T17:22:26,454 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/6805dee87ec54db984f6cd73ad94bf19, entries=150, sequenceid=331, filesize=12.0 K 2024-12-09T17:22:26,454 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for ba40c75e49d5ed83d5ce6b14bf62ff79 in 1253ms, sequenceid=331, compaction requested=true 2024-12-09T17:22:26,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:26,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:22:26,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:26,455 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:26,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:22:26,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:26,455 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:26,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:22:26,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:26,456 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:26,456 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40059 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:26,456 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/B is initiating minor compaction (all files) 2024-12-09T17:22:26,456 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/A is initiating minor compaction (all files) 2024-12-09T17:22:26,456 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/A in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:26,456 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/B in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:26,456 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/4dbc2851039c4f808cef4973f384d707, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/ab1b6d0744dc46fb8440a73a6f221681, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/1c0dc6018c0343b78fb766be6b187f2e] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=36.7 K 2024-12-09T17:22:26,456 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/03551a1ba2204eecadad88244fe656e6, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/f96dcf72382241df831fa6a907b4d84f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/727db08797844b4db3c04a0c90e5db72] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=39.1 K 2024-12-09T17:22:26,457 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03551a1ba2204eecadad88244fe656e6, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733764942787 2024-12-09T17:22:26,457 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4dbc2851039c4f808cef4973f384d707, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733764942787 2024-12-09T17:22:26,457 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting ab1b6d0744dc46fb8440a73a6f221681, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733764943440 2024-12-09T17:22:26,457 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting f96dcf72382241df831fa6a907b4d84f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733764943440 2024-12-09T17:22:26,457 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 727db08797844b4db3c04a0c90e5db72, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733764944073 2024-12-09T17:22:26,457 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c0dc6018c0343b78fb766be6b187f2e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733764944082 2024-12-09T17:22:26,465 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#A#compaction#279 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:26,465 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/e34cb104c35c4dcaa14ad0f6104ad88a is 50, key is test_row_0/A:col10/1733764945200/Put/seqid=0 2024-12-09T17:22:26,467 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#B#compaction#280 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:26,468 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/30b7d59b0ad3428cb8cb5edbadb8a652 is 50, key is test_row_0/B:col10/1733764945200/Put/seqid=0 2024-12-09T17:22:26,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742152_1328 (size=13119) 2024-12-09T17:22:26,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742153_1329 (size=13119) 2024-12-09T17:22:26,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-09T17:22:26,601 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:26,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-12-09T17:22:26,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:26,602 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2837): Flushing ba40c75e49d5ed83d5ce6b14bf62ff79 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-09T17:22:26,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=A 2024-12-09T17:22:26,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:26,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=B 2024-12-09T17:22:26,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:26,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=C 2024-12-09T17:22:26,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:26,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/ca06adb2e2f8445ebaf32dce6dac813e is 50, key is test_row_0/A:col10/1733764945224/Put/seqid=0 2024-12-09T17:22:26,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742154_1330 (size=12301) 2024-12-09T17:22:26,889 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/30b7d59b0ad3428cb8cb5edbadb8a652 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/30b7d59b0ad3428cb8cb5edbadb8a652 2024-12-09T17:22:26,889 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/e34cb104c35c4dcaa14ad0f6104ad88a as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/e34cb104c35c4dcaa14ad0f6104ad88a 2024-12-09T17:22:26,895 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/A of ba40c75e49d5ed83d5ce6b14bf62ff79 into e34cb104c35c4dcaa14ad0f6104ad88a(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:26,895 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:26,896 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/A, priority=13, startTime=1733764946454; duration=0sec 2024-12-09T17:22:26,896 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:26,896 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:A 2024-12-09T17:22:26,896 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:26,896 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/B of ba40c75e49d5ed83d5ce6b14bf62ff79 into 30b7d59b0ad3428cb8cb5edbadb8a652(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:26,896 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:26,896 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/B, priority=13, startTime=1733764946455; duration=0sec 2024-12-09T17:22:26,896 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:26,896 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:B 2024-12-09T17:22:26,897 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:26,897 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/C is initiating minor compaction (all files) 2024-12-09T17:22:26,897 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/C in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:26,897 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/e035a3c0896c4631a2d3602342c1f4bd, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/dcea4713a5d646ecb1ccc3a0f8f7b049, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/6805dee87ec54db984f6cd73ad94bf19] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=36.7 K 2024-12-09T17:22:26,897 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting e035a3c0896c4631a2d3602342c1f4bd, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733764942787 2024-12-09T17:22:26,897 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting dcea4713a5d646ecb1ccc3a0f8f7b049, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733764943440 2024-12-09T17:22:26,897 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6805dee87ec54db984f6cd73ad94bf19, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733764944082 2024-12-09T17:22:26,903 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#C#compaction#282 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:26,903 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/92204b24211e4455a7135a905530f2fb is 50, key is test_row_0/C:col10/1733764945200/Put/seqid=0 2024-12-09T17:22:26,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742155_1331 (size=13119) 2024-12-09T17:22:27,021 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/ca06adb2e2f8445ebaf32dce6dac813e 2024-12-09T17:22:27,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/d2231cf564394c128d0c47de5998d3fe is 50, key is test_row_0/B:col10/1733764945224/Put/seqid=0 2024-12-09T17:22:27,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742156_1332 (size=12301) 2024-12-09T17:22:27,317 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/92204b24211e4455a7135a905530f2fb as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/92204b24211e4455a7135a905530f2fb 2024-12-09T17:22:27,321 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/C of ba40c75e49d5ed83d5ce6b14bf62ff79 into 92204b24211e4455a7135a905530f2fb(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:27,321 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:27,321 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/C, priority=13, startTime=1733764946455; duration=0sec 2024-12-09T17:22:27,321 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:27,321 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:C 2024-12-09T17:22:27,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:27,361 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. as already flushing 2024-12-09T17:22:27,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:27,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765007379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:27,449 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/d2231cf564394c128d0c47de5998d3fe 2024-12-09T17:22:27,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/229ce7392e464659a23c6c292987f22b is 50, key is test_row_0/C:col10/1733764945224/Put/seqid=0 2024-12-09T17:22:27,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742157_1333 (size=12301) 2024-12-09T17:22:27,482 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:27,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765007481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:27,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-09T17:22:27,686 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:27,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765007685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:27,859 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/229ce7392e464659a23c6c292987f22b 2024-12-09T17:22:27,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/ca06adb2e2f8445ebaf32dce6dac813e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/ca06adb2e2f8445ebaf32dce6dac813e 2024-12-09T17:22:27,867 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/ca06adb2e2f8445ebaf32dce6dac813e, entries=150, sequenceid=351, filesize=12.0 K 2024-12-09T17:22:27,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/d2231cf564394c128d0c47de5998d3fe as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/d2231cf564394c128d0c47de5998d3fe 2024-12-09T17:22:27,872 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/d2231cf564394c128d0c47de5998d3fe, entries=150, sequenceid=351, filesize=12.0 K 2024-12-09T17:22:27,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/229ce7392e464659a23c6c292987f22b as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/229ce7392e464659a23c6c292987f22b 2024-12-09T17:22:27,876 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/229ce7392e464659a23c6c292987f22b, entries=150, sequenceid=351, filesize=12.0 K 2024-12-09T17:22:27,877 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for ba40c75e49d5ed83d5ce6b14bf62ff79 in 1275ms, sequenceid=351, compaction requested=false 2024-12-09T17:22:27,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2538): Flush status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:27,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:27,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=94 2024-12-09T17:22:27,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=94 2024-12-09T17:22:27,879 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-12-09T17:22:27,879 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4950 sec 2024-12-09T17:22:27,880 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees in 2.4980 sec 2024-12-09T17:22:27,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:27,989 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ba40c75e49d5ed83d5ce6b14bf62ff79 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-09T17:22:27,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=A 2024-12-09T17:22:27,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:27,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=B 2024-12-09T17:22:27,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:27,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=C 2024-12-09T17:22:27,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:27,993 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/fc4d68c1016a4d03bdbf639402297ea3 is 50, key is test_row_0/A:col10/1733764947375/Put/seqid=0 2024-12-09T17:22:27,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742158_1334 (size=14741) 2024-12-09T17:22:27,998 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/fc4d68c1016a4d03bdbf639402297ea3 2024-12-09T17:22:28,004 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/45d6aee1dfbf4ff3ab7ddb3b5ce7115d is 50, key is test_row_0/B:col10/1733764947375/Put/seqid=0 2024-12-09T17:22:28,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742159_1335 (size=12301) 2024-12-09T17:22:28,008 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/45d6aee1dfbf4ff3ab7ddb3b5ce7115d 2024-12-09T17:22:28,014 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/d7efeed3b2ed4deb9d5bfefd4d3a05b2 is 50, key is test_row_0/C:col10/1733764947375/Put/seqid=0 2024-12-09T17:22:28,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742160_1336 (size=12301) 2024-12-09T17:22:28,018 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/d7efeed3b2ed4deb9d5bfefd4d3a05b2 2024-12-09T17:22:28,022 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/fc4d68c1016a4d03bdbf639402297ea3 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/fc4d68c1016a4d03bdbf639402297ea3 2024-12-09T17:22:28,027 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/fc4d68c1016a4d03bdbf639402297ea3, entries=200, sequenceid=371, filesize=14.4 K 2024-12-09T17:22:28,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/45d6aee1dfbf4ff3ab7ddb3b5ce7115d as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/45d6aee1dfbf4ff3ab7ddb3b5ce7115d 2024-12-09T17:22:28,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:28,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765008029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:28,031 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/45d6aee1dfbf4ff3ab7ddb3b5ce7115d, entries=150, sequenceid=371, filesize=12.0 K 2024-12-09T17:22:28,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/d7efeed3b2ed4deb9d5bfefd4d3a05b2 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/d7efeed3b2ed4deb9d5bfefd4d3a05b2 2024-12-09T17:22:28,036 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/d7efeed3b2ed4deb9d5bfefd4d3a05b2, entries=150, sequenceid=371, filesize=12.0 K 2024-12-09T17:22:28,036 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for ba40c75e49d5ed83d5ce6b14bf62ff79 in 47ms, sequenceid=371, compaction requested=true 2024-12-09T17:22:28,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:28,036 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:22:28,036 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:28,036 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:22:28,037 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:28,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:28,037 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:28,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:22:28,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:28,037 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40161 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:28,037 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/A is initiating minor compaction (all files) 2024-12-09T17:22:28,037 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/A in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:28,038 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/e34cb104c35c4dcaa14ad0f6104ad88a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/ca06adb2e2f8445ebaf32dce6dac813e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/fc4d68c1016a4d03bdbf639402297ea3] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=39.2 K 2024-12-09T17:22:28,038 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:28,038 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/B is initiating minor compaction (all files) 2024-12-09T17:22:28,038 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting e34cb104c35c4dcaa14ad0f6104ad88a, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733764944082 2024-12-09T17:22:28,038 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/B in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:28,038 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/30b7d59b0ad3428cb8cb5edbadb8a652, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/d2231cf564394c128d0c47de5998d3fe, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/45d6aee1dfbf4ff3ab7ddb3b5ce7115d] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=36.8 K 2024-12-09T17:22:28,038 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca06adb2e2f8445ebaf32dce6dac813e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1733764945224 2024-12-09T17:22:28,038 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 30b7d59b0ad3428cb8cb5edbadb8a652, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733764944082 2024-12-09T17:22:28,038 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc4d68c1016a4d03bdbf639402297ea3, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733764947367 2024-12-09T17:22:28,038 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting d2231cf564394c128d0c47de5998d3fe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1733764945224 2024-12-09T17:22:28,039 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 45d6aee1dfbf4ff3ab7ddb3b5ce7115d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733764947367 2024-12-09T17:22:28,046 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#B#compaction#288 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:28,046 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/df67925a580941d38056376a638920e7 is 50, key is test_row_0/B:col10/1733764947375/Put/seqid=0 2024-12-09T17:22:28,049 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#A#compaction#289 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:28,049 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/180e00dc427a45b19f4030a6688e49b9 is 50, key is test_row_0/A:col10/1733764947375/Put/seqid=0 2024-12-09T17:22:28,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742161_1337 (size=13221) 2024-12-09T17:22:28,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742162_1338 (size=13221) 2024-12-09T17:22:28,070 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/df67925a580941d38056376a638920e7 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/df67925a580941d38056376a638920e7 2024-12-09T17:22:28,073 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/B of ba40c75e49d5ed83d5ce6b14bf62ff79 into df67925a580941d38056376a638920e7(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:28,074 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:28,074 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/B, priority=13, startTime=1733764948036; duration=0sec 2024-12-09T17:22:28,074 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:28,074 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:B 2024-12-09T17:22:28,074 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:28,075 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:28,075 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/C is initiating minor compaction (all files) 2024-12-09T17:22:28,075 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/C in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:28,075 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/92204b24211e4455a7135a905530f2fb, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/229ce7392e464659a23c6c292987f22b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/d7efeed3b2ed4deb9d5bfefd4d3a05b2] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=36.8 K 2024-12-09T17:22:28,075 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 92204b24211e4455a7135a905530f2fb, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733764944082 2024-12-09T17:22:28,076 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 229ce7392e464659a23c6c292987f22b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1733764945224 2024-12-09T17:22:28,076 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting d7efeed3b2ed4deb9d5bfefd4d3a05b2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733764947367 2024-12-09T17:22:28,082 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#C#compaction#290 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:28,082 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/b2d01a57148c415199174f011d93775e is 50, key is test_row_0/C:col10/1733764947375/Put/seqid=0 2024-12-09T17:22:28,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742163_1339 (size=13221) 2024-12-09T17:22:28,099 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/b2d01a57148c415199174f011d93775e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/b2d01a57148c415199174f011d93775e 2024-12-09T17:22:28,107 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/C of ba40c75e49d5ed83d5ce6b14bf62ff79 into b2d01a57148c415199174f011d93775e(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:28,107 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:28,107 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/C, priority=13, startTime=1733764948037; duration=0sec 2024-12-09T17:22:28,107 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:28,107 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:C 2024-12-09T17:22:28,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:28,133 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ba40c75e49d5ed83d5ce6b14bf62ff79 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-09T17:22:28,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=A 2024-12-09T17:22:28,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:28,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=B 2024-12-09T17:22:28,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:28,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=C 2024-12-09T17:22:28,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:28,138 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/d1d289b38a11484582afa1d1edb1ec8d is 50, key is test_row_0/A:col10/1733764948132/Put/seqid=0 2024-12-09T17:22:28,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742164_1340 (size=12301) 2024-12-09T17:22:28,166 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:28,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765008165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:28,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:28,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765008267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:28,469 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/180e00dc427a45b19f4030a6688e49b9 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/180e00dc427a45b19f4030a6688e49b9 2024-12-09T17:22:28,471 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:28,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 281 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765008471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:28,473 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/A of ba40c75e49d5ed83d5ce6b14bf62ff79 into 180e00dc427a45b19f4030a6688e49b9(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:28,473 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:28,473 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/A, priority=13, startTime=1733764948036; duration=0sec 2024-12-09T17:22:28,473 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:28,473 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:A 2024-12-09T17:22:28,543 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/d1d289b38a11484582afa1d1edb1ec8d 2024-12-09T17:22:28,548 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/4a751c287eaf4e499ddbb187f419150d is 50, key is test_row_0/B:col10/1733764948132/Put/seqid=0 2024-12-09T17:22:28,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742165_1341 (size=12301) 2024-12-09T17:22:28,775 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:28,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 283 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765008773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:28,952 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/4a751c287eaf4e499ddbb187f419150d 2024-12-09T17:22:28,969 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/9f852b87ff124cada5d3d0529ce47766 is 50, key is test_row_0/C:col10/1733764948132/Put/seqid=0 2024-12-09T17:22:28,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742166_1342 (size=12301) 2024-12-09T17:22:29,036 DEBUG [Thread-1185 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3e5c7476 to 127.0.0.1:54326 2024-12-09T17:22:29,036 DEBUG [Thread-1185 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:22:29,037 DEBUG [Thread-1181 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f6119e7 to 127.0.0.1:54326 2024-12-09T17:22:29,037 DEBUG [Thread-1181 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:22:29,037 DEBUG [Thread-1187 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1df84068 to 127.0.0.1:54326 2024-12-09T17:22:29,037 DEBUG [Thread-1187 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:22:29,037 DEBUG [Thread-1189 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x644774bd to 127.0.0.1:54326 2024-12-09T17:22:29,037 DEBUG [Thread-1189 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:22:29,039 DEBUG [Thread-1183 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7507573f to 127.0.0.1:54326 2024-12-09T17:22:29,039 DEBUG [Thread-1183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:22:29,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:29,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 285 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39196 deadline: 1733765009282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:29,379 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/9f852b87ff124cada5d3d0529ce47766 2024-12-09T17:22:29,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/d1d289b38a11484582afa1d1edb1ec8d as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/d1d289b38a11484582afa1d1edb1ec8d 2024-12-09T17:22:29,390 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/d1d289b38a11484582afa1d1edb1ec8d, entries=150, sequenceid=393, filesize=12.0 K 2024-12-09T17:22:29,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/4a751c287eaf4e499ddbb187f419150d as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/4a751c287eaf4e499ddbb187f419150d 2024-12-09T17:22:29,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/4a751c287eaf4e499ddbb187f419150d, entries=150, sequenceid=393, filesize=12.0 K 2024-12-09T17:22:29,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/9f852b87ff124cada5d3d0529ce47766 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/9f852b87ff124cada5d3d0529ce47766 2024-12-09T17:22:29,398 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/9f852b87ff124cada5d3d0529ce47766, entries=150, sequenceid=393, filesize=12.0 K 2024-12-09T17:22:29,399 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for ba40c75e49d5ed83d5ce6b14bf62ff79 in 1266ms, sequenceid=393, compaction requested=false 2024-12-09T17:22:29,399 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:29,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-09T17:22:29,489 INFO [Thread-1180 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-12-09T17:22:30,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:30,317 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ba40c75e49d5ed83d5ce6b14bf62ff79 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-09T17:22:30,317 DEBUG [Thread-1174 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x45426917 to 127.0.0.1:54326 2024-12-09T17:22:30,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=A 2024-12-09T17:22:30,318 DEBUG [Thread-1174 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:22:30,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:30,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=B 2024-12-09T17:22:30,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:30,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=C 2024-12-09T17:22:30,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:30,325 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/ce927a09c6524dc0b4584d26c05b059b is 50, key is test_row_0/A:col10/1733764948158/Put/seqid=0 2024-12-09T17:22:30,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742167_1343 (size=12301) 2024-12-09T17:22:30,732 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/ce927a09c6524dc0b4584d26c05b059b 2024-12-09T17:22:30,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/53e21074d72a42859b7824c73b656df4 is 50, key is test_row_0/B:col10/1733764948158/Put/seqid=0 2024-12-09T17:22:30,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742168_1344 (size=12301) 2024-12-09T17:22:31,148 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/53e21074d72a42859b7824c73b656df4 2024-12-09T17:22:31,159 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/f847af1f41034610b7cbafe3039bbd98 is 50, key is test_row_0/C:col10/1733764948158/Put/seqid=0 2024-12-09T17:22:31,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742169_1345 (size=12301) 2024-12-09T17:22:31,566 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/f847af1f41034610b7cbafe3039bbd98 2024-12-09T17:22:31,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/ce927a09c6524dc0b4584d26c05b059b as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/ce927a09c6524dc0b4584d26c05b059b 2024-12-09T17:22:31,580 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/ce927a09c6524dc0b4584d26c05b059b, entries=150, sequenceid=411, filesize=12.0 K 2024-12-09T17:22:31,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/53e21074d72a42859b7824c73b656df4 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/53e21074d72a42859b7824c73b656df4 2024-12-09T17:22:31,585 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/53e21074d72a42859b7824c73b656df4, entries=150, sequenceid=411, filesize=12.0 K 2024-12-09T17:22:31,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/f847af1f41034610b7cbafe3039bbd98 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/f847af1f41034610b7cbafe3039bbd98 2024-12-09T17:22:31,591 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/f847af1f41034610b7cbafe3039bbd98, entries=150, sequenceid=411, filesize=12.0 K 2024-12-09T17:22:31,592 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=0 B/0 for ba40c75e49d5ed83d5ce6b14bf62ff79 in 1275ms, sequenceid=411, compaction requested=true 2024-12-09T17:22:31,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:31,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:22:31,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:31,592 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:31,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:22:31,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:31,592 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:31,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ba40c75e49d5ed83d5ce6b14bf62ff79:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:22:31,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:31,594 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:31,594 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:31,594 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/B is initiating minor compaction (all files) 2024-12-09T17:22:31,594 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/A is initiating minor compaction (all files) 2024-12-09T17:22:31,594 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/B in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:31,594 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/A in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:31,594 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/df67925a580941d38056376a638920e7, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/4a751c287eaf4e499ddbb187f419150d, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/53e21074d72a42859b7824c73b656df4] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=36.9 K 2024-12-09T17:22:31,594 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/180e00dc427a45b19f4030a6688e49b9, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/d1d289b38a11484582afa1d1edb1ec8d, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/ce927a09c6524dc0b4584d26c05b059b] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=36.9 K 2024-12-09T17:22:31,594 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting df67925a580941d38056376a638920e7, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733764947367 2024-12-09T17:22:31,594 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 180e00dc427a45b19f4030a6688e49b9, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733764947367 2024-12-09T17:22:31,595 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a751c287eaf4e499ddbb187f419150d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733764948025 2024-12-09T17:22:31,595 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1d289b38a11484582afa1d1edb1ec8d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733764948025 2024-12-09T17:22:31,595 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 53e21074d72a42859b7824c73b656df4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1733764948158 2024-12-09T17:22:31,595 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce927a09c6524dc0b4584d26c05b059b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1733764948158 2024-12-09T17:22:31,603 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#A#compaction#297 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:31,603 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#B#compaction#298 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:31,604 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/c10b9768d9de4473a866404e92339a65 is 50, key is test_row_0/A:col10/1733764948158/Put/seqid=0 2024-12-09T17:22:31,604 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/2ab0727134524677a06b574226848238 is 50, key is test_row_0/B:col10/1733764948158/Put/seqid=0 2024-12-09T17:22:31,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742170_1346 (size=13323) 2024-12-09T17:22:31,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742171_1347 (size=13323) 2024-12-09T17:22:32,017 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/c10b9768d9de4473a866404e92339a65 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/c10b9768d9de4473a866404e92339a65 2024-12-09T17:22:32,017 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/2ab0727134524677a06b574226848238 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/2ab0727134524677a06b574226848238 2024-12-09T17:22:32,021 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/A of ba40c75e49d5ed83d5ce6b14bf62ff79 into c10b9768d9de4473a866404e92339a65(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:32,021 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:32,021 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/B of ba40c75e49d5ed83d5ce6b14bf62ff79 into 2ab0727134524677a06b574226848238(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:32,021 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/A, priority=13, startTime=1733764951592; duration=0sec 2024-12-09T17:22:32,022 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:32,022 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/B, priority=13, startTime=1733764951592; duration=0sec 2024-12-09T17:22:32,022 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:32,022 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:A 2024-12-09T17:22:32,022 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:32,022 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:32,022 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:B 2024-12-09T17:22:32,023 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:32,023 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): ba40c75e49d5ed83d5ce6b14bf62ff79/C is initiating minor compaction (all files) 2024-12-09T17:22:32,023 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ba40c75e49d5ed83d5ce6b14bf62ff79/C in TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:32,023 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/b2d01a57148c415199174f011d93775e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/9f852b87ff124cada5d3d0529ce47766, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/f847af1f41034610b7cbafe3039bbd98] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp, totalSize=36.9 K 2024-12-09T17:22:32,023 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting b2d01a57148c415199174f011d93775e, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733764947367 2024-12-09T17:22:32,024 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f852b87ff124cada5d3d0529ce47766, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733764948025 2024-12-09T17:22:32,024 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting f847af1f41034610b7cbafe3039bbd98, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1733764948158 2024-12-09T17:22:32,029 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ba40c75e49d5ed83d5ce6b14bf62ff79#C#compaction#299 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:32,030 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/9613b4cb74524634afc4e27ff027e458 is 50, key is test_row_0/C:col10/1733764948158/Put/seqid=0 2024-12-09T17:22:32,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742172_1348 (size=13323) 2024-12-09T17:22:32,444 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/9613b4cb74524634afc4e27ff027e458 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/9613b4cb74524634afc4e27ff027e458 2024-12-09T17:22:32,449 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ba40c75e49d5ed83d5ce6b14bf62ff79/C of ba40c75e49d5ed83d5ce6b14bf62ff79 into 9613b4cb74524634afc4e27ff027e458(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:32,449 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:32,449 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79., storeName=ba40c75e49d5ed83d5ce6b14bf62ff79/C, priority=13, startTime=1733764951592; duration=0sec 2024-12-09T17:22:32,449 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:32,449 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ba40c75e49d5ed83d5ce6b14bf62ff79:C 2024-12-09T17:22:33,857 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T17:22:35,763 DEBUG [Thread-1170 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7792c763 to 127.0.0.1:54326 2024-12-09T17:22:35,763 DEBUG [Thread-1170 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:22:35,821 DEBUG [Thread-1172 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0c8a18c7 to 127.0.0.1:54326 2024-12-09T17:22:35,821 DEBUG [Thread-1172 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:22:35,836 DEBUG [Thread-1176 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7e7fc60d to 127.0.0.1:54326 2024-12-09T17:22:35,836 DEBUG [Thread-1176 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:22:35,865 DEBUG [Thread-1178 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7e66ea50 to 127.0.0.1:54326 2024-12-09T17:22:35,865 DEBUG [Thread-1178 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:22:35,866 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-09T17:22:35,866 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 41 2024-12-09T17:22:35,866 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 39 2024-12-09T17:22:35,866 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 164 2024-12-09T17:22:35,866 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 41 2024-12-09T17:22:35,866 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 39 2024-12-09T17:22:35,866 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-09T17:22:35,866 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8456 2024-12-09T17:22:35,866 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8442 2024-12-09T17:22:35,866 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8548 2024-12-09T17:22:35,866 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8522 2024-12-09T17:22:35,866 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8382 2024-12-09T17:22:35,866 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-09T17:22:35,866 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-09T17:22:35,866 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72a7721c to 127.0.0.1:54326 2024-12-09T17:22:35,866 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:22:35,867 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-09T17:22:35,867 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-09T17:22:35,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=95, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-09T17:22:35,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-09T17:22:35,871 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733764955871"}]},"ts":"1733764955871"} 2024-12-09T17:22:35,873 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-09T17:22:35,892 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-09T17:22:35,892 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-09T17:22:35,894 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ba40c75e49d5ed83d5ce6b14bf62ff79, UNASSIGN}] 2024-12-09T17:22:35,895 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ba40c75e49d5ed83d5ce6b14bf62ff79, UNASSIGN 2024-12-09T17:22:35,895 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=ba40c75e49d5ed83d5ce6b14bf62ff79, regionState=CLOSING, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:35,896 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T17:22:35,896 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE; CloseRegionProcedure ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379}] 2024-12-09T17:22:35,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-09T17:22:36,048 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:36,049 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(124): Close ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:36,049 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-09T17:22:36,050 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1681): Closing ba40c75e49d5ed83d5ce6b14bf62ff79, disabling compactions & flushes 2024-12-09T17:22:36,050 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:36,050 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:36,050 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. after waiting 0 ms 2024-12-09T17:22:36,050 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:36,050 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(2837): Flushing ba40c75e49d5ed83d5ce6b14bf62ff79 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-09T17:22:36,051 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=A 2024-12-09T17:22:36,051 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:36,051 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=B 2024-12-09T17:22:36,051 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:36,051 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ba40c75e49d5ed83d5ce6b14bf62ff79, store=C 2024-12-09T17:22:36,052 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:36,059 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/76ccaa1b634f4573af2d6b27ea27bc08 is 50, key is test_row_0/A:col10/1733764955864/Put/seqid=0 2024-12-09T17:22:36,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742173_1349 (size=9857) 2024-12-09T17:22:36,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-09T17:22:36,466 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=421 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/76ccaa1b634f4573af2d6b27ea27bc08 2024-12-09T17:22:36,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-09T17:22:36,479 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/c73c6db8c4b3441db48201dec1ad9451 is 50, key is test_row_0/B:col10/1733764955864/Put/seqid=0 2024-12-09T17:22:36,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742174_1350 (size=9857) 2024-12-09T17:22:36,884 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=421 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/c73c6db8c4b3441db48201dec1ad9451 2024-12-09T17:22:36,892 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/5a674968ace34c6d8cf05cc32c7ef154 is 50, key is test_row_0/C:col10/1733764955864/Put/seqid=0 2024-12-09T17:22:36,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742175_1351 (size=9857) 2024-12-09T17:22:36,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-09T17:22:37,299 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=421 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/5a674968ace34c6d8cf05cc32c7ef154 2024-12-09T17:22:37,309 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/A/76ccaa1b634f4573af2d6b27ea27bc08 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/76ccaa1b634f4573af2d6b27ea27bc08 2024-12-09T17:22:37,313 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/76ccaa1b634f4573af2d6b27ea27bc08, entries=100, sequenceid=421, filesize=9.6 K 2024-12-09T17:22:37,314 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/B/c73c6db8c4b3441db48201dec1ad9451 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/c73c6db8c4b3441db48201dec1ad9451 2024-12-09T17:22:37,316 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/c73c6db8c4b3441db48201dec1ad9451, entries=100, sequenceid=421, filesize=9.6 K 2024-12-09T17:22:37,317 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/.tmp/C/5a674968ace34c6d8cf05cc32c7ef154 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/5a674968ace34c6d8cf05cc32c7ef154 2024-12-09T17:22:37,319 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/5a674968ace34c6d8cf05cc32c7ef154, entries=100, sequenceid=421, filesize=9.6 K 2024-12-09T17:22:37,320 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for ba40c75e49d5ed83d5ce6b14bf62ff79 in 1270ms, sequenceid=421, compaction requested=false 2024-12-09T17:22:37,320 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/c3d38846300d4689bef0c4bb2132138a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/bf8e603087044d39bb386577018f0fe9, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/018e484a14124d0bbadb8ae2398a1830, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/be63db0d02d0461493924aa62f1527e2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/4b69e42f3e2f429b911335ed6869a5d2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/0067abf942c844cb9cf3ee5543f83eec, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/bb9ec014f5f44cf7a4f68a77454a4507, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/9e8c1eb36f4f47b0aba00de02f0da806, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/19d4897a9b4640c485ccc6c86124a00c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/290ef2d9c9da41a5b429ea88e3fc7ea2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/69097445bfda4f4c899c0254b83625b8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/a0f6bb0620c64c42b16d00c613ed0754, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/0935f5715e3f4a4f98afe5dd89751b4e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/dd8c87088ed34d6ea8c3802f39623bec, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/84a5689ce1fa471eab49a3399fd3fb6b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/63e2556201df469ea100c98043f7db40, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/293bfb0bc6f04b3fb250709170e960aa, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/36f3d1a9ad93423480950a91c76ef3b2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/38da993775ae418bb7d574042283466c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/047f5aae1b974c19890f970c970caa05, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/990c652d046a48b48ed4c2318b7858d9, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/03551a1ba2204eecadad88244fe656e6, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/f96dcf72382241df831fa6a907b4d84f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/727db08797844b4db3c04a0c90e5db72, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/e34cb104c35c4dcaa14ad0f6104ad88a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/ca06adb2e2f8445ebaf32dce6dac813e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/fc4d68c1016a4d03bdbf639402297ea3, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/180e00dc427a45b19f4030a6688e49b9, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/d1d289b38a11484582afa1d1edb1ec8d, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/ce927a09c6524dc0b4584d26c05b059b] to archive 2024-12-09T17:22:37,321 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T17:22:37,322 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/c3d38846300d4689bef0c4bb2132138a to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/c3d38846300d4689bef0c4bb2132138a 2024-12-09T17:22:37,323 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/bf8e603087044d39bb386577018f0fe9 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/bf8e603087044d39bb386577018f0fe9 2024-12-09T17:22:37,324 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/018e484a14124d0bbadb8ae2398a1830 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/018e484a14124d0bbadb8ae2398a1830 2024-12-09T17:22:37,325 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/be63db0d02d0461493924aa62f1527e2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/be63db0d02d0461493924aa62f1527e2 2024-12-09T17:22:37,325 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/4b69e42f3e2f429b911335ed6869a5d2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/4b69e42f3e2f429b911335ed6869a5d2 2024-12-09T17:22:37,326 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/0067abf942c844cb9cf3ee5543f83eec to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/0067abf942c844cb9cf3ee5543f83eec 2024-12-09T17:22:37,327 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/bb9ec014f5f44cf7a4f68a77454a4507 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/bb9ec014f5f44cf7a4f68a77454a4507 2024-12-09T17:22:37,327 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/9e8c1eb36f4f47b0aba00de02f0da806 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/9e8c1eb36f4f47b0aba00de02f0da806 2024-12-09T17:22:37,328 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/19d4897a9b4640c485ccc6c86124a00c to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/19d4897a9b4640c485ccc6c86124a00c 2024-12-09T17:22:37,329 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/290ef2d9c9da41a5b429ea88e3fc7ea2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/290ef2d9c9da41a5b429ea88e3fc7ea2 2024-12-09T17:22:37,330 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/69097445bfda4f4c899c0254b83625b8 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/69097445bfda4f4c899c0254b83625b8 2024-12-09T17:22:37,330 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/a0f6bb0620c64c42b16d00c613ed0754 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/a0f6bb0620c64c42b16d00c613ed0754 2024-12-09T17:22:37,331 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/0935f5715e3f4a4f98afe5dd89751b4e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/0935f5715e3f4a4f98afe5dd89751b4e 2024-12-09T17:22:37,332 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/dd8c87088ed34d6ea8c3802f39623bec to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/dd8c87088ed34d6ea8c3802f39623bec 2024-12-09T17:22:37,332 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/84a5689ce1fa471eab49a3399fd3fb6b to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/84a5689ce1fa471eab49a3399fd3fb6b 2024-12-09T17:22:37,333 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/63e2556201df469ea100c98043f7db40 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/63e2556201df469ea100c98043f7db40 2024-12-09T17:22:37,334 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/293bfb0bc6f04b3fb250709170e960aa to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/293bfb0bc6f04b3fb250709170e960aa 2024-12-09T17:22:37,335 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/36f3d1a9ad93423480950a91c76ef3b2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/36f3d1a9ad93423480950a91c76ef3b2 2024-12-09T17:22:37,335 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/38da993775ae418bb7d574042283466c to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/38da993775ae418bb7d574042283466c 2024-12-09T17:22:37,336 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/047f5aae1b974c19890f970c970caa05 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/047f5aae1b974c19890f970c970caa05 2024-12-09T17:22:37,337 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/990c652d046a48b48ed4c2318b7858d9 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/990c652d046a48b48ed4c2318b7858d9 2024-12-09T17:22:37,337 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/03551a1ba2204eecadad88244fe656e6 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/03551a1ba2204eecadad88244fe656e6 2024-12-09T17:22:37,338 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/f96dcf72382241df831fa6a907b4d84f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/f96dcf72382241df831fa6a907b4d84f 2024-12-09T17:22:37,339 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/727db08797844b4db3c04a0c90e5db72 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/727db08797844b4db3c04a0c90e5db72 2024-12-09T17:22:37,339 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/e34cb104c35c4dcaa14ad0f6104ad88a to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/e34cb104c35c4dcaa14ad0f6104ad88a 2024-12-09T17:22:37,340 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/ca06adb2e2f8445ebaf32dce6dac813e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/ca06adb2e2f8445ebaf32dce6dac813e 2024-12-09T17:22:37,341 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/fc4d68c1016a4d03bdbf639402297ea3 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/fc4d68c1016a4d03bdbf639402297ea3 2024-12-09T17:22:37,342 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/180e00dc427a45b19f4030a6688e49b9 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/180e00dc427a45b19f4030a6688e49b9 2024-12-09T17:22:37,342 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/d1d289b38a11484582afa1d1edb1ec8d to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/d1d289b38a11484582afa1d1edb1ec8d 2024-12-09T17:22:37,343 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/ce927a09c6524dc0b4584d26c05b059b to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/ce927a09c6524dc0b4584d26c05b059b 2024-12-09T17:22:37,344 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/69287283ec164c598336e58143b06e7a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/1026f195b1234b9cb022b64e130cca57, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/ef9769cd002046bf80ec44753401c2c8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b613e3e6bff842019dea6bb0214ad3bc, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b6c4ff2d32124b78a6d77d0812e56952, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/e216badc9d034f12ac26c4538b210a78, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/6647eeb5df8b40ea87ccde3cc04deb4c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b1c7f55f6ef14a55ad430feb81d54df8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/7f26fa13612941a0a3918b241486792d, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/d9ecb68c8e2e406caa6604bc3ec6b6f3, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/a3686a301dde4939bdeba5628c7ace11, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/c22fb3018cdd4ffc9a21e838b8f38c63, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/759daad050c5474d8c9766f1b42bf422, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b31374953ba643c49c09a98462c629df, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/8c5b0ddd78e744fb9b34836fed041cb8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/ff080486f786439399e9b1fcef8dbe70, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/e8f6fccf2dd045dfa6b66eef50dd255f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/29ab95a37837464d82a4cd91f9ac6fa1, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/93fe0e9632e649e3a4cc89210d26a737, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/9fb5a5fd5c744b08809e691fa07e71d4, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/4dbc2851039c4f808cef4973f384d707, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/5fe843551345449094bb4843a581375c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/ab1b6d0744dc46fb8440a73a6f221681, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/30b7d59b0ad3428cb8cb5edbadb8a652, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/1c0dc6018c0343b78fb766be6b187f2e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/d2231cf564394c128d0c47de5998d3fe, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/df67925a580941d38056376a638920e7, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/45d6aee1dfbf4ff3ab7ddb3b5ce7115d, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/4a751c287eaf4e499ddbb187f419150d, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/53e21074d72a42859b7824c73b656df4] to archive 2024-12-09T17:22:37,345 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T17:22:37,346 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/69287283ec164c598336e58143b06e7a to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/69287283ec164c598336e58143b06e7a 2024-12-09T17:22:37,347 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/1026f195b1234b9cb022b64e130cca57 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/1026f195b1234b9cb022b64e130cca57 2024-12-09T17:22:37,347 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/ef9769cd002046bf80ec44753401c2c8 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/ef9769cd002046bf80ec44753401c2c8 2024-12-09T17:22:37,348 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b613e3e6bff842019dea6bb0214ad3bc to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b613e3e6bff842019dea6bb0214ad3bc 2024-12-09T17:22:37,349 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b6c4ff2d32124b78a6d77d0812e56952 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b6c4ff2d32124b78a6d77d0812e56952 2024-12-09T17:22:37,349 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/e216badc9d034f12ac26c4538b210a78 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/e216badc9d034f12ac26c4538b210a78 2024-12-09T17:22:37,350 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/6647eeb5df8b40ea87ccde3cc04deb4c to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/6647eeb5df8b40ea87ccde3cc04deb4c 2024-12-09T17:22:37,350 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b1c7f55f6ef14a55ad430feb81d54df8 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b1c7f55f6ef14a55ad430feb81d54df8 2024-12-09T17:22:37,351 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/7f26fa13612941a0a3918b241486792d to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/7f26fa13612941a0a3918b241486792d 2024-12-09T17:22:37,352 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/d9ecb68c8e2e406caa6604bc3ec6b6f3 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/d9ecb68c8e2e406caa6604bc3ec6b6f3 2024-12-09T17:22:37,352 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/a3686a301dde4939bdeba5628c7ace11 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/a3686a301dde4939bdeba5628c7ace11 2024-12-09T17:22:37,353 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/c22fb3018cdd4ffc9a21e838b8f38c63 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/c22fb3018cdd4ffc9a21e838b8f38c63 2024-12-09T17:22:37,354 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/759daad050c5474d8c9766f1b42bf422 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/759daad050c5474d8c9766f1b42bf422 2024-12-09T17:22:37,354 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b31374953ba643c49c09a98462c629df to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/b31374953ba643c49c09a98462c629df 2024-12-09T17:22:37,355 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/8c5b0ddd78e744fb9b34836fed041cb8 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/8c5b0ddd78e744fb9b34836fed041cb8 2024-12-09T17:22:37,355 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/ff080486f786439399e9b1fcef8dbe70 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/ff080486f786439399e9b1fcef8dbe70 2024-12-09T17:22:37,356 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/e8f6fccf2dd045dfa6b66eef50dd255f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/e8f6fccf2dd045dfa6b66eef50dd255f 2024-12-09T17:22:37,357 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/29ab95a37837464d82a4cd91f9ac6fa1 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/29ab95a37837464d82a4cd91f9ac6fa1 2024-12-09T17:22:37,357 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/93fe0e9632e649e3a4cc89210d26a737 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/93fe0e9632e649e3a4cc89210d26a737 2024-12-09T17:22:37,358 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/9fb5a5fd5c744b08809e691fa07e71d4 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/9fb5a5fd5c744b08809e691fa07e71d4 2024-12-09T17:22:37,358 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/4dbc2851039c4f808cef4973f384d707 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/4dbc2851039c4f808cef4973f384d707 2024-12-09T17:22:37,359 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/5fe843551345449094bb4843a581375c to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/5fe843551345449094bb4843a581375c 2024-12-09T17:22:37,360 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/ab1b6d0744dc46fb8440a73a6f221681 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/ab1b6d0744dc46fb8440a73a6f221681 2024-12-09T17:22:37,361 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/30b7d59b0ad3428cb8cb5edbadb8a652 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/30b7d59b0ad3428cb8cb5edbadb8a652 2024-12-09T17:22:37,362 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/1c0dc6018c0343b78fb766be6b187f2e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/1c0dc6018c0343b78fb766be6b187f2e 2024-12-09T17:22:37,362 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/d2231cf564394c128d0c47de5998d3fe to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/d2231cf564394c128d0c47de5998d3fe 2024-12-09T17:22:37,363 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/df67925a580941d38056376a638920e7 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/df67925a580941d38056376a638920e7 2024-12-09T17:22:37,364 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/45d6aee1dfbf4ff3ab7ddb3b5ce7115d to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/45d6aee1dfbf4ff3ab7ddb3b5ce7115d 2024-12-09T17:22:37,365 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/4a751c287eaf4e499ddbb187f419150d to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/4a751c287eaf4e499ddbb187f419150d 2024-12-09T17:22:37,365 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/53e21074d72a42859b7824c73b656df4 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/53e21074d72a42859b7824c73b656df4 2024-12-09T17:22:37,366 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/1a95daf5300f4e4c8576b8464c42e659, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/f7bcbc128780499c9f05a139e0c6dfa6, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/ef421f688ce9438f837060d7f164b55e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/32e55fbdf2fe46acb0306dc36b10c472, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/53b9913ee08e4df19fa6fbe5b734c341, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/97ca08739cd747e39d5634ecacf6453e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/8fc127fbbff34ebfa54fec4c11f44402, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/bda8b0d929c445ed9e6bcb0736f4814f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/f36fe77004164a4e8ab2117c7a551b3b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/bf355dbc89904528a2494a72cc50f1c0, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/66d4b4a363db4c47bd00f10039b5d2d6, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/aee9f7e8ea67490c958df30b6d7139c8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/6c7280be1f944d91830487a8e7cb2111, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/68df5d6307ab4aadb1f55dc3bdb284f5, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/3f22a4af317d4898ba6fefb62c564925, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/aea0611db49e4a83b4c1e400bdea74a1, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/64f997520fd242528ca5c05ead6596fc, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/2b8d62d44ece4f6f975f7455b310b96f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/222bb496bc1c41688480de78ba967dd0, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/816c6b13d2564f06aa3d7af52f55fc8e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/e035a3c0896c4631a2d3602342c1f4bd, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/5e20a73cc0c6427bbc0e360f31b58adf, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/dcea4713a5d646ecb1ccc3a0f8f7b049, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/92204b24211e4455a7135a905530f2fb, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/6805dee87ec54db984f6cd73ad94bf19, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/229ce7392e464659a23c6c292987f22b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/b2d01a57148c415199174f011d93775e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/d7efeed3b2ed4deb9d5bfefd4d3a05b2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/9f852b87ff124cada5d3d0529ce47766, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/f847af1f41034610b7cbafe3039bbd98] to archive 2024-12-09T17:22:37,367 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T17:22:37,368 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/1a95daf5300f4e4c8576b8464c42e659 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/1a95daf5300f4e4c8576b8464c42e659 2024-12-09T17:22:37,369 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/f7bcbc128780499c9f05a139e0c6dfa6 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/f7bcbc128780499c9f05a139e0c6dfa6 2024-12-09T17:22:37,370 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/ef421f688ce9438f837060d7f164b55e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/ef421f688ce9438f837060d7f164b55e 2024-12-09T17:22:37,371 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/32e55fbdf2fe46acb0306dc36b10c472 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/32e55fbdf2fe46acb0306dc36b10c472 2024-12-09T17:22:37,372 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/53b9913ee08e4df19fa6fbe5b734c341 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/53b9913ee08e4df19fa6fbe5b734c341 2024-12-09T17:22:37,372 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/97ca08739cd747e39d5634ecacf6453e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/97ca08739cd747e39d5634ecacf6453e 2024-12-09T17:22:37,373 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/8fc127fbbff34ebfa54fec4c11f44402 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/8fc127fbbff34ebfa54fec4c11f44402 2024-12-09T17:22:37,374 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/bda8b0d929c445ed9e6bcb0736f4814f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/bda8b0d929c445ed9e6bcb0736f4814f 2024-12-09T17:22:37,375 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/f36fe77004164a4e8ab2117c7a551b3b to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/f36fe77004164a4e8ab2117c7a551b3b 2024-12-09T17:22:37,376 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/bf355dbc89904528a2494a72cc50f1c0 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/bf355dbc89904528a2494a72cc50f1c0 2024-12-09T17:22:37,377 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/66d4b4a363db4c47bd00f10039b5d2d6 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/66d4b4a363db4c47bd00f10039b5d2d6 2024-12-09T17:22:37,377 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/aee9f7e8ea67490c958df30b6d7139c8 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/aee9f7e8ea67490c958df30b6d7139c8 2024-12-09T17:22:37,378 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/6c7280be1f944d91830487a8e7cb2111 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/6c7280be1f944d91830487a8e7cb2111 2024-12-09T17:22:37,379 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/68df5d6307ab4aadb1f55dc3bdb284f5 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/68df5d6307ab4aadb1f55dc3bdb284f5 2024-12-09T17:22:37,380 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/3f22a4af317d4898ba6fefb62c564925 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/3f22a4af317d4898ba6fefb62c564925 2024-12-09T17:22:37,381 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/aea0611db49e4a83b4c1e400bdea74a1 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/aea0611db49e4a83b4c1e400bdea74a1 2024-12-09T17:22:37,382 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/64f997520fd242528ca5c05ead6596fc to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/64f997520fd242528ca5c05ead6596fc 2024-12-09T17:22:37,383 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/2b8d62d44ece4f6f975f7455b310b96f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/2b8d62d44ece4f6f975f7455b310b96f 2024-12-09T17:22:37,383 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/222bb496bc1c41688480de78ba967dd0 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/222bb496bc1c41688480de78ba967dd0 2024-12-09T17:22:37,384 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/816c6b13d2564f06aa3d7af52f55fc8e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/816c6b13d2564f06aa3d7af52f55fc8e 2024-12-09T17:22:37,385 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/e035a3c0896c4631a2d3602342c1f4bd to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/e035a3c0896c4631a2d3602342c1f4bd 2024-12-09T17:22:37,386 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/5e20a73cc0c6427bbc0e360f31b58adf to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/5e20a73cc0c6427bbc0e360f31b58adf 2024-12-09T17:22:37,387 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/dcea4713a5d646ecb1ccc3a0f8f7b049 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/dcea4713a5d646ecb1ccc3a0f8f7b049 2024-12-09T17:22:37,388 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/92204b24211e4455a7135a905530f2fb to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/92204b24211e4455a7135a905530f2fb 2024-12-09T17:22:37,389 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/6805dee87ec54db984f6cd73ad94bf19 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/6805dee87ec54db984f6cd73ad94bf19 2024-12-09T17:22:37,389 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/229ce7392e464659a23c6c292987f22b to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/229ce7392e464659a23c6c292987f22b 2024-12-09T17:22:37,390 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/b2d01a57148c415199174f011d93775e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/b2d01a57148c415199174f011d93775e 2024-12-09T17:22:37,391 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/d7efeed3b2ed4deb9d5bfefd4d3a05b2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/d7efeed3b2ed4deb9d5bfefd4d3a05b2 2024-12-09T17:22:37,392 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/9f852b87ff124cada5d3d0529ce47766 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/9f852b87ff124cada5d3d0529ce47766 2024-12-09T17:22:37,393 DEBUG [StoreCloser-TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/f847af1f41034610b7cbafe3039bbd98 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/f847af1f41034610b7cbafe3039bbd98 2024-12-09T17:22:37,397 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/recovered.edits/424.seqid, newMaxSeqId=424, maxSeqId=1 2024-12-09T17:22:37,397 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79. 2024-12-09T17:22:37,398 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1635): Region close journal for ba40c75e49d5ed83d5ce6b14bf62ff79: 2024-12-09T17:22:37,399 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(170): Closed ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:37,399 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=ba40c75e49d5ed83d5ce6b14bf62ff79, regionState=CLOSED 2024-12-09T17:22:37,401 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-12-09T17:22:37,401 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; CloseRegionProcedure ba40c75e49d5ed83d5ce6b14bf62ff79, server=80c69eb3c456,42927,1733764865379 in 1.5040 sec 2024-12-09T17:22:37,402 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-12-09T17:22:37,402 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ba40c75e49d5ed83d5ce6b14bf62ff79, UNASSIGN in 1.5070 sec 2024-12-09T17:22:37,403 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-12-09T17:22:37,403 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5100 sec 2024-12-09T17:22:37,403 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733764957403"}]},"ts":"1733764957403"} 2024-12-09T17:22:37,404 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-09T17:22:37,417 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-09T17:22:37,418 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5490 sec 2024-12-09T17:22:37,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-09T17:22:37,979 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 95 completed 2024-12-09T17:22:37,980 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-09T17:22:37,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:22:37,982 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=99, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:22:37,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-09T17:22:37,984 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=99, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:22:37,986 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:37,991 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A, FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B, FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C, FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/recovered.edits] 2024-12-09T17:22:37,996 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/76ccaa1b634f4573af2d6b27ea27bc08 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/76ccaa1b634f4573af2d6b27ea27bc08 2024-12-09T17:22:37,998 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/c10b9768d9de4473a866404e92339a65 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/A/c10b9768d9de4473a866404e92339a65 2024-12-09T17:22:38,002 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/2ab0727134524677a06b574226848238 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/2ab0727134524677a06b574226848238 2024-12-09T17:22:38,004 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/c73c6db8c4b3441db48201dec1ad9451 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/B/c73c6db8c4b3441db48201dec1ad9451 2024-12-09T17:22:38,007 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/5a674968ace34c6d8cf05cc32c7ef154 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/5a674968ace34c6d8cf05cc32c7ef154 2024-12-09T17:22:38,009 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/9613b4cb74524634afc4e27ff027e458 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/C/9613b4cb74524634afc4e27ff027e458 2024-12-09T17:22:38,013 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/recovered.edits/424.seqid to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79/recovered.edits/424.seqid 2024-12-09T17:22:38,013 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/ba40c75e49d5ed83d5ce6b14bf62ff79 2024-12-09T17:22:38,013 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-09T17:22:38,016 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=99, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:22:38,024 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-09T17:22:38,026 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-09T17:22:38,027 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=99, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:22:38,027 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-09T17:22:38,027 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733764958027"}]},"ts":"9223372036854775807"} 2024-12-09T17:22:38,029 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-09T17:22:38,029 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => ba40c75e49d5ed83d5ce6b14bf62ff79, NAME => 'TestAcidGuarantees,,1733764926769.ba40c75e49d5ed83d5ce6b14bf62ff79.', STARTKEY => '', ENDKEY => ''}] 2024-12-09T17:22:38,029 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-09T17:22:38,030 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733764958029"}]},"ts":"9223372036854775807"} 2024-12-09T17:22:38,031 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-09T17:22:38,076 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=99, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:22:38,077 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 96 msec 2024-12-09T17:22:38,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-09T17:22:38,085 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 99 completed 2024-12-09T17:22:38,095 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=237 (was 242), OpenFileDescriptor=453 (was 463), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=320 (was 379), ProcessCount=11 (was 11), AvailableMemoryMB=4224 (was 4311) 2024-12-09T17:22:38,102 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=237, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=320, ProcessCount=11, AvailableMemoryMB=4223 2024-12-09T17:22:38,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-09T17:22:38,104 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T17:22:38,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=100, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-09T17:22:38,106 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T17:22:38,106 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:38,106 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 100 2024-12-09T17:22:38,107 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T17:22:38,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-09T17:22:38,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742176_1352 (size=963) 2024-12-09T17:22:38,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-09T17:22:38,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-09T17:22:38,516 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4 2024-12-09T17:22:38,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742177_1353 (size=53) 2024-12-09T17:22:38,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-09T17:22:38,926 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T17:22:38,926 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 5a7680059af346aa87c054fb00a90c2f, disabling compactions & flushes 2024-12-09T17:22:38,926 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:38,926 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:38,926 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. after waiting 0 ms 2024-12-09T17:22:38,927 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:38,927 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:38,927 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:38,929 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T17:22:38,930 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733764958929"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733764958929"}]},"ts":"1733764958929"} 2024-12-09T17:22:38,932 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-09T17:22:38,933 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T17:22:38,934 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733764958933"}]},"ts":"1733764958933"} 2024-12-09T17:22:38,935 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-09T17:22:38,983 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a7680059af346aa87c054fb00a90c2f, ASSIGN}] 2024-12-09T17:22:38,984 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a7680059af346aa87c054fb00a90c2f, ASSIGN 2024-12-09T17:22:38,985 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a7680059af346aa87c054fb00a90c2f, ASSIGN; state=OFFLINE, location=80c69eb3c456,42927,1733764865379; forceNewPlan=false, retain=false 2024-12-09T17:22:39,136 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=5a7680059af346aa87c054fb00a90c2f, regionState=OPENING, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:39,139 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE; OpenRegionProcedure 5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379}] 2024-12-09T17:22:39,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-09T17:22:39,293 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:39,298 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:39,298 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7285): Opening region: {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} 2024-12-09T17:22:39,298 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:39,299 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T17:22:39,299 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7327): checking encryption for 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:39,299 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7330): checking classloading for 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:39,301 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:39,304 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:22:39,304 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5a7680059af346aa87c054fb00a90c2f columnFamilyName A 2024-12-09T17:22:39,305 DEBUG [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:39,305 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] regionserver.HStore(327): Store=5a7680059af346aa87c054fb00a90c2f/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:22:39,306 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:39,307 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:22:39,307 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5a7680059af346aa87c054fb00a90c2f columnFamilyName B 2024-12-09T17:22:39,307 DEBUG [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:39,308 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] regionserver.HStore(327): Store=5a7680059af346aa87c054fb00a90c2f/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:22:39,308 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:39,310 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:22:39,310 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5a7680059af346aa87c054fb00a90c2f columnFamilyName C 2024-12-09T17:22:39,310 DEBUG [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:39,311 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] regionserver.HStore(327): Store=5a7680059af346aa87c054fb00a90c2f/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:22:39,311 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:39,312 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:39,313 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:39,315 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T17:22:39,317 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1085): writing seq id for 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:39,320 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T17:22:39,320 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1102): Opened 5a7680059af346aa87c054fb00a90c2f; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67725232, jitterRate=0.009184598922729492}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T17:22:39,321 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1001): Region open journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:39,322 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., pid=102, masterSystemTime=1733764959293 2024-12-09T17:22:39,324 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:39,324 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:39,325 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=5a7680059af346aa87c054fb00a90c2f, regionState=OPEN, openSeqNum=2, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:39,328 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-12-09T17:22:39,328 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; OpenRegionProcedure 5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 in 187 msec 2024-12-09T17:22:39,330 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-12-09T17:22:39,330 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a7680059af346aa87c054fb00a90c2f, ASSIGN in 345 msec 2024-12-09T17:22:39,331 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T17:22:39,331 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733764959331"}]},"ts":"1733764959331"} 2024-12-09T17:22:39,333 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-09T17:22:39,377 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=100, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T17:22:39,380 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2730 sec 2024-12-09T17:22:40,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-09T17:22:40,216 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 100 completed 2024-12-09T17:22:40,219 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4dacfd49 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5271608e 2024-12-09T17:22:40,269 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f9fed4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:22:40,273 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:22:40,275 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42806, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:22:40,277 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T17:22:40,279 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56900, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T17:22:40,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-09T17:22:40,281 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T17:22:40,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=103, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-09T17:22:40,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742178_1354 (size=999) 2024-12-09T17:22:40,695 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-09T17:22:40,695 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-09T17:22:40,700 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=104, ppid=103, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-09T17:22:40,703 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a7680059af346aa87c054fb00a90c2f, REOPEN/MOVE}] 2024-12-09T17:22:40,703 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a7680059af346aa87c054fb00a90c2f, REOPEN/MOVE 2024-12-09T17:22:40,704 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=105 updating hbase:meta row=5a7680059af346aa87c054fb00a90c2f, regionState=CLOSING, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:40,704 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T17:22:40,705 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=106, ppid=105, state=RUNNABLE; CloseRegionProcedure 5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379}] 2024-12-09T17:22:40,856 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:40,857 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] handler.UnassignRegionHandler(124): Close 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:40,857 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-09T17:22:40,857 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1681): Closing 5a7680059af346aa87c054fb00a90c2f, disabling compactions & flushes 2024-12-09T17:22:40,857 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:40,857 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:40,857 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. after waiting 0 ms 2024-12-09T17:22:40,857 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:40,898 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-09T17:22:40,899 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:40,899 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1635): Region close journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:40,899 WARN [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegionServer(3786): Not adding moved region record: 5a7680059af346aa87c054fb00a90c2f to self. 2024-12-09T17:22:40,902 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] handler.UnassignRegionHandler(170): Closed 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:40,903 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=105 updating hbase:meta row=5a7680059af346aa87c054fb00a90c2f, regionState=CLOSED 2024-12-09T17:22:40,905 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=106, resume processing ppid=105 2024-12-09T17:22:40,905 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, ppid=105, state=SUCCESS; CloseRegionProcedure 5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 in 199 msec 2024-12-09T17:22:40,905 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=105, ppid=104, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a7680059af346aa87c054fb00a90c2f, REOPEN/MOVE; state=CLOSED, location=80c69eb3c456,42927,1733764865379; forceNewPlan=false, retain=true 2024-12-09T17:22:41,056 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=105 updating hbase:meta row=5a7680059af346aa87c054fb00a90c2f, regionState=OPENING, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,057 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=105, state=RUNNABLE; OpenRegionProcedure 5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379}] 2024-12-09T17:22:41,210 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,217 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:41,217 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(7285): Opening region: {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} 2024-12-09T17:22:41,219 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:41,219 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T17:22:41,219 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(7327): checking encryption for 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:41,219 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(7330): checking classloading for 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:41,221 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:41,222 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:22:41,223 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5a7680059af346aa87c054fb00a90c2f columnFamilyName A 2024-12-09T17:22:41,224 DEBUG [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:41,225 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] regionserver.HStore(327): Store=5a7680059af346aa87c054fb00a90c2f/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:22:41,226 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:41,227 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:22:41,227 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5a7680059af346aa87c054fb00a90c2f columnFamilyName B 2024-12-09T17:22:41,227 DEBUG [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:41,228 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] regionserver.HStore(327): Store=5a7680059af346aa87c054fb00a90c2f/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:22:41,228 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:41,229 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:22:41,229 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5a7680059af346aa87c054fb00a90c2f columnFamilyName C 2024-12-09T17:22:41,229 DEBUG [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:41,230 INFO [StoreOpener-5a7680059af346aa87c054fb00a90c2f-1 {}] regionserver.HStore(327): Store=5a7680059af346aa87c054fb00a90c2f/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:22:41,230 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:41,231 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:41,232 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:41,233 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T17:22:41,233 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(1085): writing seq id for 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:41,234 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(1102): Opened 5a7680059af346aa87c054fb00a90c2f; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68622847, jitterRate=0.022560104727745056}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T17:22:41,234 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(1001): Region open journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:41,235 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., pid=107, masterSystemTime=1733764961210 2024-12-09T17:22:41,236 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:41,236 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:41,236 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=105 updating hbase:meta row=5a7680059af346aa87c054fb00a90c2f, regionState=OPEN, openSeqNum=5, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,238 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=105 2024-12-09T17:22:41,238 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=105, state=SUCCESS; OpenRegionProcedure 5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 in 180 msec 2024-12-09T17:22:41,239 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-12-09T17:22:41,239 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a7680059af346aa87c054fb00a90c2f, REOPEN/MOVE in 536 msec 2024-12-09T17:22:41,240 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=104, resume processing ppid=103 2024-12-09T17:22:41,240 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, ppid=103, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 539 msec 2024-12-09T17:22:41,241 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 959 msec 2024-12-09T17:22:41,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=103 2024-12-09T17:22:41,243 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x033feebb to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a139b42 2024-12-09T17:22:41,270 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1157d18a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:22:41,270 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c40db2e to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1acf826f 2024-12-09T17:22:41,284 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@353bcb3d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:22:41,285 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7a86cb71 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1cbce2b4 2024-12-09T17:22:41,296 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77b5b03d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:22:41,297 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3401188a to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4fd3f5fc 2024-12-09T17:22:41,304 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15bd9063, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:22:41,305 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x55650656 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c97513 2024-12-09T17:22:41,312 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c0ec341, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:22:41,313 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x42af2962 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4831febd 2024-12-09T17:22:41,321 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b660061, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:22:41,322 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5910b8c7 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1e93614e 2024-12-09T17:22:41,329 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45ad0ff5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:22:41,330 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x003f9a05 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@523025d 2024-12-09T17:22:41,337 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28dc77ab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:22:41,338 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x26b6d860 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b9a1701 2024-12-09T17:22:41,346 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70304ef6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:22:41,346 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x16722a1f to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7d3b05cf 2024-12-09T17:22:41,354 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f8ea360, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:22:41,357 DEBUG [hconnection-0x164e1471-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:22:41,358 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:22:41,358 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42808, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:22:41,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-12-09T17:22:41,359 DEBUG [hconnection-0x1900e2d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:22:41,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-09T17:22:41,360 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:22:41,360 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42822, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:22:41,361 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:22:41,361 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:22:41,363 DEBUG [hconnection-0x3cadc092-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:22:41,364 DEBUG [hconnection-0x111403b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:22:41,364 DEBUG [hconnection-0xcd2da43-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:22:41,364 DEBUG [hconnection-0x742f0a62-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:22:41,364 DEBUG [hconnection-0x26cd340e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:22:41,365 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42854, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:22:41,365 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42838, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:22:41,365 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42836, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:22:41,365 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42864, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:22:41,365 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42876, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:22:41,365 DEBUG [hconnection-0x35cab96f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:22:41,366 DEBUG [hconnection-0x4d1a23b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:22:41,366 DEBUG [hconnection-0x56860fa1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:22:41,366 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42878, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:22:41,366 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42880, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:22:41,367 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42882, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:22:41,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:41,371 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a7680059af346aa87c054fb00a90c2f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-09T17:22:41,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=A 2024-12-09T17:22:41,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:41,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=B 2024-12-09T17:22:41,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:41,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=C 2024-12-09T17:22:41,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:41,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:41,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765021384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:41,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765021384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:41,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765021384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,387 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:41,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42880 deadline: 1733765021384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,387 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:41,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765021385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,396 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209987c77a135f447d6b8952bc3b9e0b831_5a7680059af346aa87c054fb00a90c2f is 50, key is test_row_0/A:col10/1733764961370/Put/seqid=0 2024-12-09T17:22:41,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742179_1355 (size=12154) 2024-12-09T17:22:41,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-09T17:22:41,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:41,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765021487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:41,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765021487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:41,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765021487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:41,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42880 deadline: 1733765021488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:41,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765021488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,512 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,513 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-09T17:22:41,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:41,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:41,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:41,513 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:41,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:41,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:41,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-09T17:22:41,665 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,666 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-09T17:22:41,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:41,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:41,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:41,666 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:41,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:41,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:41,692 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:41,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765021689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:41,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765021689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,693 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:41,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765021692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:41,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42880 deadline: 1733765021692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:41,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765021692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,805 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:41,809 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209987c77a135f447d6b8952bc3b9e0b831_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209987c77a135f447d6b8952bc3b9e0b831_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:41,809 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/e8b0efd21e3b4a11a476f845882ebfe0, store: [table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:41,810 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/e8b0efd21e3b4a11a476f845882ebfe0 is 175, key is test_row_0/A:col10/1733764961370/Put/seqid=0 2024-12-09T17:22:41,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742180_1356 (size=30955) 2024-12-09T17:22:41,818 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,818 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-09T17:22:41,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:41,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:41,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:41,818 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:41,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:41,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:41,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-09T17:22:41,970 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,970 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-09T17:22:41,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:41,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:41,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:41,970 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:41,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:41,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:41,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:41,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765021995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:41,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765021995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:41,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765021995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:41,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765021995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:41,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:41,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42880 deadline: 1733765021995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:42,122 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:42,122 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-09T17:22:42,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:42,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:42,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:42,123 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:42,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:42,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:42,214 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/e8b0efd21e3b4a11a476f845882ebfe0 2024-12-09T17:22:42,234 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/b6a924e1b9eb4782921000f511243274 is 50, key is test_row_0/B:col10/1733764961370/Put/seqid=0 2024-12-09T17:22:42,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742181_1357 (size=12001) 2024-12-09T17:22:42,274 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:42,275 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-09T17:22:42,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:42,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:42,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:42,275 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:42,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:42,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:42,427 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:42,427 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-09T17:22:42,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:42,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:42,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:42,427 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:42,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:42,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:42,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-09T17:22:42,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:42,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765022498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:42,501 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:42,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765022498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:42,502 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:42,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765022500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:42,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:42,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765022500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:42,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:42,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42880 deadline: 1733765022500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:42,579 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:42,579 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-09T17:22:42,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:42,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:42,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:42,579 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:42,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:42,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:42,638 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/b6a924e1b9eb4782921000f511243274 2024-12-09T17:22:42,654 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/51ae0f2409574964b24a423e3e290ee6 is 50, key is test_row_0/C:col10/1733764961370/Put/seqid=0 2024-12-09T17:22:42,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742182_1358 (size=12001) 2024-12-09T17:22:42,731 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:42,731 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-09T17:22:42,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:42,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:42,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:42,732 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:42,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:42,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:42,883 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:42,884 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-09T17:22:42,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:42,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:42,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:42,884 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:42,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:42,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:42,917 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-09T17:22:43,036 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:43,037 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-09T17:22:43,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:43,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:43,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:43,037 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:43,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:43,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:43,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/51ae0f2409574964b24a423e3e290ee6 2024-12-09T17:22:43,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/e8b0efd21e3b4a11a476f845882ebfe0 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/e8b0efd21e3b4a11a476f845882ebfe0 2024-12-09T17:22:43,065 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/e8b0efd21e3b4a11a476f845882ebfe0, entries=150, sequenceid=17, filesize=30.2 K 2024-12-09T17:22:43,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/b6a924e1b9eb4782921000f511243274 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/b6a924e1b9eb4782921000f511243274 2024-12-09T17:22:43,069 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/b6a924e1b9eb4782921000f511243274, entries=150, sequenceid=17, filesize=11.7 K 2024-12-09T17:22:43,070 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/51ae0f2409574964b24a423e3e290ee6 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/51ae0f2409574964b24a423e3e290ee6 2024-12-09T17:22:43,074 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/51ae0f2409574964b24a423e3e290ee6, entries=150, sequenceid=17, filesize=11.7 K 2024-12-09T17:22:43,075 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 5a7680059af346aa87c054fb00a90c2f in 1704ms, sequenceid=17, compaction requested=false 2024-12-09T17:22:43,075 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-09T17:22:43,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:43,189 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:43,189 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-09T17:22:43,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:43,189 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 5a7680059af346aa87c054fb00a90c2f 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-09T17:22:43,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=A 2024-12-09T17:22:43,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:43,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=B 2024-12-09T17:22:43,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:43,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=C 2024-12-09T17:22:43,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:43,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412096c26c4bcfd994a86ab5168379166ff23_5a7680059af346aa87c054fb00a90c2f is 50, key is test_row_0/A:col10/1733764961382/Put/seqid=0 2024-12-09T17:22:43,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742183_1359 (size=12154) 2024-12-09T17:22:43,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-09T17:22:43,505 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:43,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:43,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:43,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42880 deadline: 1733765023513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:43,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:43,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765023514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:43,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:43,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765023515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:43,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:43,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765023515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:43,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:43,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765023515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:43,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:43,602 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412096c26c4bcfd994a86ab5168379166ff23_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412096c26c4bcfd994a86ab5168379166ff23_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:43,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/a3c3e33b08804e67976e53488e45b490, store: [table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:43,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/a3c3e33b08804e67976e53488e45b490 is 175, key is test_row_0/A:col10/1733764961382/Put/seqid=0 2024-12-09T17:22:43,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742184_1360 (size=30955) 2024-12-09T17:22:43,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:43,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765023617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:43,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:43,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765023619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:43,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:43,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765023619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:43,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:43,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765023619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:43,823 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:43,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765023821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:43,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:43,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765023822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:43,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:43,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765023822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:43,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:43,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765023823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:44,006 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/a3c3e33b08804e67976e53488e45b490 2024-12-09T17:22:44,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/a7f3de00433142338f86410438756450 is 50, key is test_row_0/B:col10/1733764961382/Put/seqid=0 2024-12-09T17:22:44,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742185_1361 (size=12001) 2024-12-09T17:22:44,018 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/a7f3de00433142338f86410438756450 2024-12-09T17:22:44,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/13355335a79b4b999b236dac286f74b8 is 50, key is test_row_0/C:col10/1733764961382/Put/seqid=0 2024-12-09T17:22:44,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742186_1362 (size=12001) 2024-12-09T17:22:44,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:44,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765024124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:44,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:44,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765024125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:44,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:44,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765024126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:44,130 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:44,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765024127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:44,431 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/13355335a79b4b999b236dac286f74b8 2024-12-09T17:22:44,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/a3c3e33b08804e67976e53488e45b490 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/a3c3e33b08804e67976e53488e45b490 2024-12-09T17:22:44,437 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/a3c3e33b08804e67976e53488e45b490, entries=150, sequenceid=41, filesize=30.2 K 2024-12-09T17:22:44,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/a7f3de00433142338f86410438756450 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/a7f3de00433142338f86410438756450 2024-12-09T17:22:44,440 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/a7f3de00433142338f86410438756450, entries=150, sequenceid=41, filesize=11.7 K 2024-12-09T17:22:44,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/13355335a79b4b999b236dac286f74b8 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/13355335a79b4b999b236dac286f74b8 2024-12-09T17:22:44,442 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/13355335a79b4b999b236dac286f74b8, entries=150, sequenceid=41, filesize=11.7 K 2024-12-09T17:22:44,443 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 5a7680059af346aa87c054fb00a90c2f in 1254ms, sequenceid=41, compaction requested=false 2024-12-09T17:22:44,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:44,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:44,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-09T17:22:44,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-09T17:22:44,445 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-12-09T17:22:44,445 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0830 sec 2024-12-09T17:22:44,446 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 3.0880 sec 2024-12-09T17:22:44,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:44,633 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a7680059af346aa87c054fb00a90c2f 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-09T17:22:44,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=A 2024-12-09T17:22:44,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:44,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=B 2024-12-09T17:22:44,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:44,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=C 2024-12-09T17:22:44,633 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:44,638 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412091c0f7a76f04540b8a649735312bcda73_5a7680059af346aa87c054fb00a90c2f is 50, key is test_row_0/A:col10/1733764964631/Put/seqid=0 2024-12-09T17:22:44,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742187_1363 (size=14594) 2024-12-09T17:22:44,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:44,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765024652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:44,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:44,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765024653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:44,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:44,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765024656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:44,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:44,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765024656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:44,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:44,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765024761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:44,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:44,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765024761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:44,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:44,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765024762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:44,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:44,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765024762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:44,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:44,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765024965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:44,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:44,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765024965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:44,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:44,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765024966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:44,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:44,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765024966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:45,043 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:45,045 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412091c0f7a76f04540b8a649735312bcda73_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412091c0f7a76f04540b8a649735312bcda73_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:45,046 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/be363441ace6414a8dcf43e95c7d6400, store: [table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:45,046 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/be363441ace6414a8dcf43e95c7d6400 is 175, key is test_row_0/A:col10/1733764964631/Put/seqid=0 2024-12-09T17:22:45,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742188_1364 (size=39549) 2024-12-09T17:22:45,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:45,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765025268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:45,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:45,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765025268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:45,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:45,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765025270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:45,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:45,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765025270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:45,450 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/be363441ace6414a8dcf43e95c7d6400 2024-12-09T17:22:45,455 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/4a05a2031eb54ed9a36df2e705fe7d44 is 50, key is test_row_0/B:col10/1733764964631/Put/seqid=0 2024-12-09T17:22:45,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742189_1365 (size=12001) 2024-12-09T17:22:45,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-09T17:22:45,464 INFO [Thread-1607 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-12-09T17:22:45,464 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:22:45,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-12-09T17:22:45,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-09T17:22:45,466 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:22:45,466 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:22:45,466 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:22:45,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:45,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42880 deadline: 1733765025535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:45,537 DEBUG [Thread-1597 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4153 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., hostname=80c69eb3c456,42927,1733764865379, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:22:45,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-09T17:22:45,617 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:45,617 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-09T17:22:45,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:45,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:45,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:45,618 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:45,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:45,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:45,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-09T17:22:45,769 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:45,770 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-09T17:22:45,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:45,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:45,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:45,770 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:45,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:45,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:45,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:45,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765025772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:45,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:45,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765025772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:45,778 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:45,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765025773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:45,782 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:45,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765025777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:45,858 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/4a05a2031eb54ed9a36df2e705fe7d44 2024-12-09T17:22:45,863 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/bc33f64bdf6b4ae3975a80af872abad5 is 50, key is test_row_0/C:col10/1733764964631/Put/seqid=0 2024-12-09T17:22:45,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742190_1366 (size=12001) 2024-12-09T17:22:45,921 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:45,922 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-09T17:22:45,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:45,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:45,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:45,922 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:45,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:45,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:46,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-09T17:22:46,074 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:46,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-09T17:22:46,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:46,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:46,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:46,074 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:46,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:46,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:46,226 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:46,226 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-09T17:22:46,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:46,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:46,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:46,226 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:46,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:46,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:46,266 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/bc33f64bdf6b4ae3975a80af872abad5 2024-12-09T17:22:46,269 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/be363441ace6414a8dcf43e95c7d6400 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/be363441ace6414a8dcf43e95c7d6400 2024-12-09T17:22:46,271 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/be363441ace6414a8dcf43e95c7d6400, entries=200, sequenceid=54, filesize=38.6 K 2024-12-09T17:22:46,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/4a05a2031eb54ed9a36df2e705fe7d44 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/4a05a2031eb54ed9a36df2e705fe7d44 2024-12-09T17:22:46,275 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/4a05a2031eb54ed9a36df2e705fe7d44, entries=150, sequenceid=54, filesize=11.7 K 2024-12-09T17:22:46,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/bc33f64bdf6b4ae3975a80af872abad5 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/bc33f64bdf6b4ae3975a80af872abad5 2024-12-09T17:22:46,278 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/bc33f64bdf6b4ae3975a80af872abad5, entries=150, sequenceid=54, filesize=11.7 K 2024-12-09T17:22:46,307 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 5a7680059af346aa87c054fb00a90c2f in 1675ms, sequenceid=54, compaction requested=true 2024-12-09T17:22:46,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:46,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a7680059af346aa87c054fb00a90c2f:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:22:46,307 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:46,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:46,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a7680059af346aa87c054fb00a90c2f:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:22:46,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:46,307 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:46,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a7680059af346aa87c054fb00a90c2f:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:22:46,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:46,308 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:46,308 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:46,308 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 5a7680059af346aa87c054fb00a90c2f/A is initiating minor compaction (all files) 2024-12-09T17:22:46,308 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 5a7680059af346aa87c054fb00a90c2f/B is initiating minor compaction (all files) 2024-12-09T17:22:46,308 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a7680059af346aa87c054fb00a90c2f/A in TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:46,308 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a7680059af346aa87c054fb00a90c2f/B in TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:46,308 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/e8b0efd21e3b4a11a476f845882ebfe0, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/a3c3e33b08804e67976e53488e45b490, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/be363441ace6414a8dcf43e95c7d6400] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp, totalSize=99.1 K 2024-12-09T17:22:46,308 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/b6a924e1b9eb4782921000f511243274, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/a7f3de00433142338f86410438756450, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/4a05a2031eb54ed9a36df2e705fe7d44] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp, totalSize=35.2 K 2024-12-09T17:22:46,308 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:46,308 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. files: [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/e8b0efd21e3b4a11a476f845882ebfe0, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/a3c3e33b08804e67976e53488e45b490, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/be363441ace6414a8dcf43e95c7d6400] 2024-12-09T17:22:46,308 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting b6a924e1b9eb4782921000f511243274, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733764961370 2024-12-09T17:22:46,308 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8b0efd21e3b4a11a476f845882ebfe0, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733764961370 2024-12-09T17:22:46,309 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting a7f3de00433142338f86410438756450, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733764961382 2024-12-09T17:22:46,309 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3c3e33b08804e67976e53488e45b490, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733764961382 2024-12-09T17:22:46,309 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a05a2031eb54ed9a36df2e705fe7d44, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733764963510 2024-12-09T17:22:46,309 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting be363441ace6414a8dcf43e95c7d6400, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733764963510 2024-12-09T17:22:46,314 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:46,315 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a7680059af346aa87c054fb00a90c2f#B#compaction#312 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:46,315 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/56700c817ab845339f29fef2b175301b is 50, key is test_row_0/B:col10/1733764964631/Put/seqid=0 2024-12-09T17:22:46,315 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241209890f6e9607f140eab503eb5d00255ee4_5a7680059af346aa87c054fb00a90c2f store=[table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:46,317 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241209890f6e9607f140eab503eb5d00255ee4_5a7680059af346aa87c054fb00a90c2f, store=[table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:46,317 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209890f6e9607f140eab503eb5d00255ee4_5a7680059af346aa87c054fb00a90c2f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:46,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742191_1367 (size=12104) 2024-12-09T17:22:46,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742192_1368 (size=4469) 2024-12-09T17:22:46,378 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:46,378 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-09T17:22:46,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:46,379 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 5a7680059af346aa87c054fb00a90c2f 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-09T17:22:46,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=A 2024-12-09T17:22:46,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:46,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=B 2024-12-09T17:22:46,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:46,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=C 2024-12-09T17:22:46,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:46,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209b8f5311e93f246a4827ec3bdfa76d5e4_5a7680059af346aa87c054fb00a90c2f is 50, key is test_row_0/A:col10/1733764964655/Put/seqid=0 2024-12-09T17:22:46,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742193_1369 (size=12154) 2024-12-09T17:22:46,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-09T17:22:46,723 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/56700c817ab845339f29fef2b175301b as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/56700c817ab845339f29fef2b175301b 2024-12-09T17:22:46,725 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a7680059af346aa87c054fb00a90c2f#A#compaction#313 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:46,726 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/7f7a0093db8b4e6494fa81fe8cb31bf7 is 175, key is test_row_0/A:col10/1733764964631/Put/seqid=0 2024-12-09T17:22:46,726 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a7680059af346aa87c054fb00a90c2f/B of 5a7680059af346aa87c054fb00a90c2f into 56700c817ab845339f29fef2b175301b(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:46,727 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:46,727 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., storeName=5a7680059af346aa87c054fb00a90c2f/B, priority=13, startTime=1733764966307; duration=0sec 2024-12-09T17:22:46,727 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:46,727 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a7680059af346aa87c054fb00a90c2f:B 2024-12-09T17:22:46,727 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:46,728 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:46,728 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 5a7680059af346aa87c054fb00a90c2f/C is initiating minor compaction (all files) 2024-12-09T17:22:46,728 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a7680059af346aa87c054fb00a90c2f/C in TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:46,728 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/51ae0f2409574964b24a423e3e290ee6, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/13355335a79b4b999b236dac286f74b8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/bc33f64bdf6b4ae3975a80af872abad5] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp, totalSize=35.2 K 2024-12-09T17:22:46,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742194_1370 (size=31058) 2024-12-09T17:22:46,728 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 51ae0f2409574964b24a423e3e290ee6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733764961370 2024-12-09T17:22:46,729 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 13355335a79b4b999b236dac286f74b8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733764961382 2024-12-09T17:22:46,729 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting bc33f64bdf6b4ae3975a80af872abad5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733764963510 2024-12-09T17:22:46,732 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/7f7a0093db8b4e6494fa81fe8cb31bf7 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/7f7a0093db8b4e6494fa81fe8cb31bf7 2024-12-09T17:22:46,734 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a7680059af346aa87c054fb00a90c2f#C#compaction#315 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:46,735 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/2bd3a5224a504185aeab8efca2e8b7a9 is 50, key is test_row_0/C:col10/1733764964631/Put/seqid=0 2024-12-09T17:22:46,737 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a7680059af346aa87c054fb00a90c2f/A of 5a7680059af346aa87c054fb00a90c2f into 7f7a0093db8b4e6494fa81fe8cb31bf7(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:46,737 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:46,737 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., storeName=5a7680059af346aa87c054fb00a90c2f/A, priority=13, startTime=1733764966307; duration=0sec 2024-12-09T17:22:46,737 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:46,737 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a7680059af346aa87c054fb00a90c2f:A 2024-12-09T17:22:46,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742195_1371 (size=12104) 2024-12-09T17:22:46,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:46,781 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:46,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:46,791 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209b8f5311e93f246a4827ec3bdfa76d5e4_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209b8f5311e93f246a4827ec3bdfa76d5e4_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:46,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/a1adaa7649184e69b82847f6ff84e50e, store: [table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:46,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/a1adaa7649184e69b82847f6ff84e50e is 175, key is test_row_0/A:col10/1733764964655/Put/seqid=0 2024-12-09T17:22:46,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742196_1372 (size=30955) 2024-12-09T17:22:46,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:46,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765026818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:46,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:46,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765026819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:46,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:46,824 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:46,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765026819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:46,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765026820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:46,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:46,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765026921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:46,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:46,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765026924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:46,929 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:46,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765026925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:46,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:46,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765026925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:47,128 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:47,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765027125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:47,132 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:47,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765027130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:47,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:47,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765027131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:47,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:47,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765027131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:47,141 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/2bd3a5224a504185aeab8efca2e8b7a9 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/2bd3a5224a504185aeab8efca2e8b7a9 2024-12-09T17:22:47,144 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a7680059af346aa87c054fb00a90c2f/C of 5a7680059af346aa87c054fb00a90c2f into 2bd3a5224a504185aeab8efca2e8b7a9(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:47,144 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:47,144 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., storeName=5a7680059af346aa87c054fb00a90c2f/C, priority=13, startTime=1733764966307; duration=0sec 2024-12-09T17:22:47,145 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:47,145 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a7680059af346aa87c054fb00a90c2f:C 2024-12-09T17:22:47,195 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/a1adaa7649184e69b82847f6ff84e50e 2024-12-09T17:22:47,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/c9e824c3f90e48758852ad8ff594b2e2 is 50, key is test_row_0/B:col10/1733764964655/Put/seqid=0 2024-12-09T17:22:47,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742197_1373 (size=12001) 2024-12-09T17:22:47,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:47,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765027430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:47,436 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:47,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765027434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:47,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:47,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765027435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:47,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:47,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765027436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:47,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-09T17:22:47,605 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/c9e824c3f90e48758852ad8ff594b2e2 2024-12-09T17:22:47,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/5af132d1e3ff4dcf96aec2848c2c1790 is 50, key is test_row_0/C:col10/1733764964655/Put/seqid=0 2024-12-09T17:22:47,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742198_1374 (size=12001) 2024-12-09T17:22:47,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:47,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765027937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:47,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:47,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765027938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:47,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:47,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765027939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:47,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:47,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765027942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:48,016 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/5af132d1e3ff4dcf96aec2848c2c1790 2024-12-09T17:22:48,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/a1adaa7649184e69b82847f6ff84e50e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/a1adaa7649184e69b82847f6ff84e50e 2024-12-09T17:22:48,022 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/a1adaa7649184e69b82847f6ff84e50e, entries=150, sequenceid=77, filesize=30.2 K 2024-12-09T17:22:48,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/c9e824c3f90e48758852ad8ff594b2e2 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/c9e824c3f90e48758852ad8ff594b2e2 2024-12-09T17:22:48,025 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/c9e824c3f90e48758852ad8ff594b2e2, entries=150, sequenceid=77, filesize=11.7 K 2024-12-09T17:22:48,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/5af132d1e3ff4dcf96aec2848c2c1790 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/5af132d1e3ff4dcf96aec2848c2c1790 2024-12-09T17:22:48,028 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/5af132d1e3ff4dcf96aec2848c2c1790, entries=150, sequenceid=77, filesize=11.7 K 2024-12-09T17:22:48,029 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 5a7680059af346aa87c054fb00a90c2f in 1649ms, sequenceid=77, compaction requested=false 2024-12-09T17:22:48,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:48,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:48,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-12-09T17:22:48,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-12-09T17:22:48,030 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-09T17:22:48,030 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5640 sec 2024-12-09T17:22:48,031 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 2.5670 sec 2024-12-09T17:22:48,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:48,946 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a7680059af346aa87c054fb00a90c2f 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-09T17:22:48,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=A 2024-12-09T17:22:48,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:48,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=B 2024-12-09T17:22:48,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:48,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=C 2024-12-09T17:22:48,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:48,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120966d08858743a49428da15bb6250e04f5_5a7680059af346aa87c054fb00a90c2f is 50, key is test_row_0/A:col10/1733764966819/Put/seqid=0 2024-12-09T17:22:48,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742199_1375 (size=17034) 2024-12-09T17:22:48,969 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:48,972 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120966d08858743a49428da15bb6250e04f5_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120966d08858743a49428da15bb6250e04f5_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:48,973 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/cc21a649e9464829a3500bcb38c90d7b, store: [table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:48,973 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/cc21a649e9464829a3500bcb38c90d7b is 175, key is test_row_0/A:col10/1733764966819/Put/seqid=0 2024-12-09T17:22:48,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:48,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765028974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:48,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:48,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765028975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:48,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:48,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765028977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:48,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:48,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765028980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:48,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742200_1376 (size=48139) 2024-12-09T17:22:49,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:49,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765029081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:49,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:49,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765029081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:49,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:49,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765029083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:49,089 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:49,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765029086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:49,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:49,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765029283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:49,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:49,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765029284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:49,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:49,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765029287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:49,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:49,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765029290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:49,393 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=95, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/cc21a649e9464829a3500bcb38c90d7b 2024-12-09T17:22:49,399 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/f511895c85924ad092a630033472354b is 50, key is test_row_0/B:col10/1733764966819/Put/seqid=0 2024-12-09T17:22:49,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742201_1377 (size=12001) 2024-12-09T17:22:49,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:49,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42880 deadline: 1733765029556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:49,558 DEBUG [Thread-1597 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8174 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., hostname=80c69eb3c456,42927,1733764865379, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:22:49,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-09T17:22:49,569 INFO [Thread-1607 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-12-09T17:22:49,570 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:22:49,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-12-09T17:22:49,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-09T17:22:49,571 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:22:49,572 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:22:49,572 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:22:49,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:49,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765029587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:49,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:49,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765029587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:49,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:49,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765029592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:49,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:49,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765029595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:49,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-09T17:22:49,723 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:49,723 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-09T17:22:49,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:49,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:49,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:49,724 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:49,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:49,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:49,808 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/f511895c85924ad092a630033472354b 2024-12-09T17:22:49,813 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/7c111ff48da64c489f38d7edbdf6e281 is 50, key is test_row_0/C:col10/1733764966819/Put/seqid=0 2024-12-09T17:22:49,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742202_1378 (size=12001) 2024-12-09T17:22:49,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-09T17:22:49,875 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:49,875 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-09T17:22:49,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:49,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:49,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:49,876 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:49,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:49,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:50,027 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:50,028 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-09T17:22:50,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:50,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:50,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:50,028 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:50,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:50,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:50,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:50,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765030094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:50,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:50,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765030094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:50,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:50,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765030096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:50,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:50,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765030100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:50,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-09T17:22:50,180 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:50,180 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-09T17:22:50,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:50,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:50,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:50,180 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:50,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:50,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:50,218 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/7c111ff48da64c489f38d7edbdf6e281 2024-12-09T17:22:50,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/cc21a649e9464829a3500bcb38c90d7b as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/cc21a649e9464829a3500bcb38c90d7b 2024-12-09T17:22:50,224 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/cc21a649e9464829a3500bcb38c90d7b, entries=250, sequenceid=95, filesize=47.0 K 2024-12-09T17:22:50,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/f511895c85924ad092a630033472354b as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/f511895c85924ad092a630033472354b 2024-12-09T17:22:50,228 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/f511895c85924ad092a630033472354b, entries=150, sequenceid=95, filesize=11.7 K 2024-12-09T17:22:50,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/7c111ff48da64c489f38d7edbdf6e281 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/7c111ff48da64c489f38d7edbdf6e281 2024-12-09T17:22:50,231 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/7c111ff48da64c489f38d7edbdf6e281, entries=150, sequenceid=95, filesize=11.7 K 2024-12-09T17:22:50,231 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 5a7680059af346aa87c054fb00a90c2f in 1285ms, sequenceid=95, compaction requested=true 2024-12-09T17:22:50,231 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:50,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a7680059af346aa87c054fb00a90c2f:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:22:50,232 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:50,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:50,232 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:50,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a7680059af346aa87c054fb00a90c2f:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:22:50,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:50,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a7680059af346aa87c054fb00a90c2f:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:22:50,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:50,233 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110152 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:50,233 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:50,233 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 5a7680059af346aa87c054fb00a90c2f/A is initiating minor compaction (all files) 2024-12-09T17:22:50,233 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 5a7680059af346aa87c054fb00a90c2f/B is initiating minor compaction (all files) 2024-12-09T17:22:50,233 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a7680059af346aa87c054fb00a90c2f/B in TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:50,233 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/56700c817ab845339f29fef2b175301b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/c9e824c3f90e48758852ad8ff594b2e2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/f511895c85924ad092a630033472354b] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp, totalSize=35.3 K 2024-12-09T17:22:50,233 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a7680059af346aa87c054fb00a90c2f/A in TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:50,233 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/7f7a0093db8b4e6494fa81fe8cb31bf7, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/a1adaa7649184e69b82847f6ff84e50e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/cc21a649e9464829a3500bcb38c90d7b] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp, totalSize=107.6 K 2024-12-09T17:22:50,233 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:50,234 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. files: [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/7f7a0093db8b4e6494fa81fe8cb31bf7, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/a1adaa7649184e69b82847f6ff84e50e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/cc21a649e9464829a3500bcb38c90d7b] 2024-12-09T17:22:50,234 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 56700c817ab845339f29fef2b175301b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733764963510 2024-12-09T17:22:50,234 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f7a0093db8b4e6494fa81fe8cb31bf7, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733764963510 2024-12-09T17:22:50,234 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting c9e824c3f90e48758852ad8ff594b2e2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733764964650 2024-12-09T17:22:50,234 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1adaa7649184e69b82847f6ff84e50e, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733764964650 2024-12-09T17:22:50,234 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting f511895c85924ad092a630033472354b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733764966819 2024-12-09T17:22:50,234 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc21a649e9464829a3500bcb38c90d7b, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733764966813 2024-12-09T17:22:50,239 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:50,239 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a7680059af346aa87c054fb00a90c2f#B#compaction#321 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:50,240 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/d545279225004b66a9df355f05a4ef8c is 50, key is test_row_0/B:col10/1733764966819/Put/seqid=0 2024-12-09T17:22:50,241 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241209cb9ab5c451974f03b0a7ea84f7a8484f_5a7680059af346aa87c054fb00a90c2f store=[table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:50,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742203_1379 (size=12207) 2024-12-09T17:22:50,243 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241209cb9ab5c451974f03b0a7ea84f7a8484f_5a7680059af346aa87c054fb00a90c2f, store=[table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:50,243 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209cb9ab5c451974f03b0a7ea84f7a8484f_5a7680059af346aa87c054fb00a90c2f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:50,249 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/d545279225004b66a9df355f05a4ef8c as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/d545279225004b66a9df355f05a4ef8c 2024-12-09T17:22:50,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742204_1380 (size=4469) 2024-12-09T17:22:50,251 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a7680059af346aa87c054fb00a90c2f#A#compaction#322 average throughput is 2.04 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:50,252 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/21f1e1fe87fc4a5f95ffdfaea3e07aec is 175, key is test_row_0/A:col10/1733764966819/Put/seqid=0 2024-12-09T17:22:50,254 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a7680059af346aa87c054fb00a90c2f/B of 5a7680059af346aa87c054fb00a90c2f into d545279225004b66a9df355f05a4ef8c(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:50,254 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:50,254 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., storeName=5a7680059af346aa87c054fb00a90c2f/B, priority=13, startTime=1733764970232; duration=0sec 2024-12-09T17:22:50,254 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:50,254 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a7680059af346aa87c054fb00a90c2f:B 2024-12-09T17:22:50,254 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:50,255 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:50,255 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 5a7680059af346aa87c054fb00a90c2f/C is initiating minor compaction (all files) 2024-12-09T17:22:50,255 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a7680059af346aa87c054fb00a90c2f/C in TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:50,255 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/2bd3a5224a504185aeab8efca2e8b7a9, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/5af132d1e3ff4dcf96aec2848c2c1790, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/7c111ff48da64c489f38d7edbdf6e281] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp, totalSize=35.3 K 2024-12-09T17:22:50,255 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 2bd3a5224a504185aeab8efca2e8b7a9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733764963510 2024-12-09T17:22:50,256 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 5af132d1e3ff4dcf96aec2848c2c1790, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733764964650 2024-12-09T17:22:50,256 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c111ff48da64c489f38d7edbdf6e281, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733764966819 2024-12-09T17:22:50,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742205_1381 (size=31161) 2024-12-09T17:22:50,261 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a7680059af346aa87c054fb00a90c2f#C#compaction#323 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:50,262 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/e3b9cd93b9ec405481db9d8d82fc1234 is 50, key is test_row_0/C:col10/1733764966819/Put/seqid=0 2024-12-09T17:22:50,268 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/21f1e1fe87fc4a5f95ffdfaea3e07aec as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/21f1e1fe87fc4a5f95ffdfaea3e07aec 2024-12-09T17:22:50,272 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a7680059af346aa87c054fb00a90c2f/A of 5a7680059af346aa87c054fb00a90c2f into 21f1e1fe87fc4a5f95ffdfaea3e07aec(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:50,272 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:50,272 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., storeName=5a7680059af346aa87c054fb00a90c2f/A, priority=13, startTime=1733764970232; duration=0sec 2024-12-09T17:22:50,272 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:50,272 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a7680059af346aa87c054fb00a90c2f:A 2024-12-09T17:22:50,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742206_1382 (size=12207) 2024-12-09T17:22:50,332 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:50,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-09T17:22:50,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:50,333 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 5a7680059af346aa87c054fb00a90c2f 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-09T17:22:50,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=A 2024-12-09T17:22:50,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:50,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=B 2024-12-09T17:22:50,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:50,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=C 2024-12-09T17:22:50,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:50,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209c5b41d2c73764776b3eedc44d62dd5dd_5a7680059af346aa87c054fb00a90c2f is 50, key is test_row_0/A:col10/1733764968979/Put/seqid=0 2024-12-09T17:22:50,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742207_1383 (size=12154) 2024-12-09T17:22:50,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-09T17:22:50,681 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/e3b9cd93b9ec405481db9d8d82fc1234 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/e3b9cd93b9ec405481db9d8d82fc1234 2024-12-09T17:22:50,684 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a7680059af346aa87c054fb00a90c2f/C of 5a7680059af346aa87c054fb00a90c2f into e3b9cd93b9ec405481db9d8d82fc1234(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:50,684 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:50,684 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., storeName=5a7680059af346aa87c054fb00a90c2f/C, priority=13, startTime=1733764970232; duration=0sec 2024-12-09T17:22:50,684 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:50,684 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a7680059af346aa87c054fb00a90c2f:C 2024-12-09T17:22:50,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:50,745 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209c5b41d2c73764776b3eedc44d62dd5dd_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209c5b41d2c73764776b3eedc44d62dd5dd_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:50,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/527fbf220e82427e895fa4eb7c95f0ff, store: [table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:50,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/527fbf220e82427e895fa4eb7c95f0ff is 175, key is test_row_0/A:col10/1733764968979/Put/seqid=0 2024-12-09T17:22:50,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742208_1384 (size=30955) 2024-12-09T17:22:51,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:51,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:51,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:51,122 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:51,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765031116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:51,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765031116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:51,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:51,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765031117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:51,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:51,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765031122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:51,152 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/527fbf220e82427e895fa4eb7c95f0ff 2024-12-09T17:22:51,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/b9411dfd0f8341b781a66c989c1e21a1 is 50, key is test_row_0/B:col10/1733764968979/Put/seqid=0 2024-12-09T17:22:51,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742209_1385 (size=12001) 2024-12-09T17:22:51,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:51,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765031223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:51,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:51,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765031223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:51,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:51,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765031223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:51,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:51,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765031226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:51,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:51,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765031426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:51,428 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:51,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765031426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:51,428 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:51,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765031427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:51,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:51,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765031429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:51,562 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/b9411dfd0f8341b781a66c989c1e21a1 2024-12-09T17:22:51,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/9d24da3589424c0b8830a91632556a7a is 50, key is test_row_0/C:col10/1733764968979/Put/seqid=0 2024-12-09T17:22:51,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742210_1386 (size=12001) 2024-12-09T17:22:51,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-09T17:22:51,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:51,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765031729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:51,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:51,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765031730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:51,733 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:51,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765031730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:51,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:51,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765031734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:52,014 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/9d24da3589424c0b8830a91632556a7a 2024-12-09T17:22:52,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/527fbf220e82427e895fa4eb7c95f0ff as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/527fbf220e82427e895fa4eb7c95f0ff 2024-12-09T17:22:52,020 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/527fbf220e82427e895fa4eb7c95f0ff, entries=150, sequenceid=118, filesize=30.2 K 2024-12-09T17:22:52,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/b9411dfd0f8341b781a66c989c1e21a1 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/b9411dfd0f8341b781a66c989c1e21a1 2024-12-09T17:22:52,024 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/b9411dfd0f8341b781a66c989c1e21a1, entries=150, sequenceid=118, filesize=11.7 K 2024-12-09T17:22:52,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/9d24da3589424c0b8830a91632556a7a as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/9d24da3589424c0b8830a91632556a7a 2024-12-09T17:22:52,032 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/9d24da3589424c0b8830a91632556a7a, entries=150, sequenceid=118, filesize=11.7 K 2024-12-09T17:22:52,033 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 5a7680059af346aa87c054fb00a90c2f in 1700ms, sequenceid=118, compaction requested=false 2024-12-09T17:22:52,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:52,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:52,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-12-09T17:22:52,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-12-09T17:22:52,035 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-09T17:22:52,035 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4620 sec 2024-12-09T17:22:52,036 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 2.4650 sec 2024-12-09T17:22:52,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:52,235 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a7680059af346aa87c054fb00a90c2f 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-09T17:22:52,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=A 2024-12-09T17:22:52,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:52,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=B 2024-12-09T17:22:52,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:52,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=C 2024-12-09T17:22:52,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:52,240 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209dfc8dbd36e1443c7959346c120af85e6_5a7680059af346aa87c054fb00a90c2f is 50, key is test_row_0/A:col10/1733764971116/Put/seqid=0 2024-12-09T17:22:52,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742211_1387 (size=14744) 2024-12-09T17:22:52,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:52,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765032258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:52,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:52,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:52,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765032260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:52,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765032259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:52,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:52,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765032264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:52,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:52,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765032365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:52,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:52,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765032365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:52,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:52,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765032365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:52,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:52,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765032369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:52,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:52,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765032568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:52,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:52,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765032568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:52,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:52,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765032568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:52,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:52,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765032574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:52,644 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:52,647 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209dfc8dbd36e1443c7959346c120af85e6_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209dfc8dbd36e1443c7959346c120af85e6_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:52,647 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/c493e974a3b64fd4a980f8a439462312, store: [table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:52,648 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/c493e974a3b64fd4a980f8a439462312 is 175, key is test_row_0/A:col10/1733764971116/Put/seqid=0 2024-12-09T17:22:52,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742212_1388 (size=39699) 2024-12-09T17:22:52,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:52,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765032873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:52,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:52,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765032873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:52,877 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:52,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765032874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:52,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:52,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765032877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:53,053 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=135, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/c493e974a3b64fd4a980f8a439462312 2024-12-09T17:22:53,058 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/9c7bc3af9961459d8f20dbfdd9eb8758 is 50, key is test_row_0/B:col10/1733764971116/Put/seqid=0 2024-12-09T17:22:53,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742213_1389 (size=12151) 2024-12-09T17:22:53,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:53,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765033378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:53,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:53,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765033380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:53,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:53,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765033380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:53,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:53,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765033383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:53,461 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/9c7bc3af9961459d8f20dbfdd9eb8758 2024-12-09T17:22:53,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/b02ecaab47e243dcadc0109325b33181 is 50, key is test_row_0/C:col10/1733764971116/Put/seqid=0 2024-12-09T17:22:53,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742214_1390 (size=12151) 2024-12-09T17:22:53,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-09T17:22:53,675 INFO [Thread-1607 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-12-09T17:22:53,676 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:22:53,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-12-09T17:22:53,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-09T17:22:53,677 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:22:53,678 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:22:53,678 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:22:53,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-09T17:22:53,829 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:53,829 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-09T17:22:53,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:53,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:53,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:53,830 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:53,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:53,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:53,870 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/b02ecaab47e243dcadc0109325b33181 2024-12-09T17:22:53,875 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/c493e974a3b64fd4a980f8a439462312 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/c493e974a3b64fd4a980f8a439462312 2024-12-09T17:22:53,878 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/c493e974a3b64fd4a980f8a439462312, entries=200, sequenceid=135, filesize=38.8 K 2024-12-09T17:22:53,878 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/9c7bc3af9961459d8f20dbfdd9eb8758 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/9c7bc3af9961459d8f20dbfdd9eb8758 2024-12-09T17:22:53,881 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/9c7bc3af9961459d8f20dbfdd9eb8758, entries=150, sequenceid=135, filesize=11.9 K 2024-12-09T17:22:53,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/b02ecaab47e243dcadc0109325b33181 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/b02ecaab47e243dcadc0109325b33181 2024-12-09T17:22:53,885 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/b02ecaab47e243dcadc0109325b33181, entries=150, sequenceid=135, filesize=11.9 K 2024-12-09T17:22:53,886 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 5a7680059af346aa87c054fb00a90c2f in 1651ms, sequenceid=135, compaction requested=true 2024-12-09T17:22:53,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:53,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a7680059af346aa87c054fb00a90c2f:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:22:53,886 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:53,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:53,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a7680059af346aa87c054fb00a90c2f:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:22:53,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:53,887 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:53,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a7680059af346aa87c054fb00a90c2f:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:22:53,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:53,887 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:53,887 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 5a7680059af346aa87c054fb00a90c2f/B is initiating minor compaction (all files) 2024-12-09T17:22:53,887 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a7680059af346aa87c054fb00a90c2f/B in TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:53,887 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/d545279225004b66a9df355f05a4ef8c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/b9411dfd0f8341b781a66c989c1e21a1, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/9c7bc3af9961459d8f20dbfdd9eb8758] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp, totalSize=35.5 K 2024-12-09T17:22:53,888 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101815 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:53,888 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 5a7680059af346aa87c054fb00a90c2f/A is initiating minor compaction (all files) 2024-12-09T17:22:53,888 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a7680059af346aa87c054fb00a90c2f/A in TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:53,888 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/21f1e1fe87fc4a5f95ffdfaea3e07aec, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/527fbf220e82427e895fa4eb7c95f0ff, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/c493e974a3b64fd4a980f8a439462312] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp, totalSize=99.4 K 2024-12-09T17:22:53,888 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:53,888 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. files: [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/21f1e1fe87fc4a5f95ffdfaea3e07aec, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/527fbf220e82427e895fa4eb7c95f0ff, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/c493e974a3b64fd4a980f8a439462312] 2024-12-09T17:22:53,888 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting d545279225004b66a9df355f05a4ef8c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733764966819 2024-12-09T17:22:53,888 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 21f1e1fe87fc4a5f95ffdfaea3e07aec, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733764966819 2024-12-09T17:22:53,888 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 527fbf220e82427e895fa4eb7c95f0ff, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733764968969 2024-12-09T17:22:53,888 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting b9411dfd0f8341b781a66c989c1e21a1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733764968969 2024-12-09T17:22:53,889 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting c493e974a3b64fd4a980f8a439462312, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733764971102 2024-12-09T17:22:53,889 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c7bc3af9961459d8f20dbfdd9eb8758, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733764971116 2024-12-09T17:22:53,896 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:53,897 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a7680059af346aa87c054fb00a90c2f#B#compaction#330 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:53,898 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/138478ad04074c199bfb0858091e0e14 is 50, key is test_row_0/B:col10/1733764971116/Put/seqid=0 2024-12-09T17:22:53,898 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120996762d4629f4468f97b9ad44a403c7b6_5a7680059af346aa87c054fb00a90c2f store=[table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:53,900 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120996762d4629f4468f97b9ad44a403c7b6_5a7680059af346aa87c054fb00a90c2f, store=[table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:53,900 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120996762d4629f4468f97b9ad44a403c7b6_5a7680059af346aa87c054fb00a90c2f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:53,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742215_1391 (size=12459) 2024-12-09T17:22:53,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742216_1392 (size=4469) 2024-12-09T17:22:53,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-09T17:22:53,981 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:53,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-09T17:22:53,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:53,982 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 5a7680059af346aa87c054fb00a90c2f 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-09T17:22:53,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=A 2024-12-09T17:22:53,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:53,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=B 2024-12-09T17:22:53,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:53,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=C 2024-12-09T17:22:53,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:53,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209b0575e258ecb43cf8f8e547d01bd83c3_5a7680059af346aa87c054fb00a90c2f is 50, key is test_row_0/A:col10/1733764972259/Put/seqid=0 2024-12-09T17:22:53,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742217_1393 (size=12304) 2024-12-09T17:22:54,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-09T17:22:54,304 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a7680059af346aa87c054fb00a90c2f#A#compaction#331 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:54,304 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/4f72e0a7c4714825918dcafd0157e736 is 175, key is test_row_0/A:col10/1733764971116/Put/seqid=0 2024-12-09T17:22:54,306 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/138478ad04074c199bfb0858091e0e14 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/138478ad04074c199bfb0858091e0e14 2024-12-09T17:22:54,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742218_1394 (size=31413) 2024-12-09T17:22:54,312 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a7680059af346aa87c054fb00a90c2f/B of 5a7680059af346aa87c054fb00a90c2f into 138478ad04074c199bfb0858091e0e14(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:54,312 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:54,312 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., storeName=5a7680059af346aa87c054fb00a90c2f/B, priority=13, startTime=1733764973886; duration=0sec 2024-12-09T17:22:54,312 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:54,312 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a7680059af346aa87c054fb00a90c2f:B 2024-12-09T17:22:54,313 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:22:54,316 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:22:54,316 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 5a7680059af346aa87c054fb00a90c2f/C is initiating minor compaction (all files) 2024-12-09T17:22:54,316 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a7680059af346aa87c054fb00a90c2f/C in TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:54,316 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/e3b9cd93b9ec405481db9d8d82fc1234, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/9d24da3589424c0b8830a91632556a7a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/b02ecaab47e243dcadc0109325b33181] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp, totalSize=35.5 K 2024-12-09T17:22:54,316 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting e3b9cd93b9ec405481db9d8d82fc1234, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733764966819 2024-12-09T17:22:54,317 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d24da3589424c0b8830a91632556a7a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733764968969 2024-12-09T17:22:54,317 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting b02ecaab47e243dcadc0109325b33181, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733764971116 2024-12-09T17:22:54,317 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/4f72e0a7c4714825918dcafd0157e736 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/4f72e0a7c4714825918dcafd0157e736 2024-12-09T17:22:54,321 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a7680059af346aa87c054fb00a90c2f/A of 5a7680059af346aa87c054fb00a90c2f into 4f72e0a7c4714825918dcafd0157e736(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:54,321 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:54,321 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., storeName=5a7680059af346aa87c054fb00a90c2f/A, priority=13, startTime=1733764973886; duration=0sec 2024-12-09T17:22:54,321 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:54,321 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a7680059af346aa87c054fb00a90c2f:A 2024-12-09T17:22:54,323 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a7680059af346aa87c054fb00a90c2f#C#compaction#333 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:54,323 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/a31284b490d7406580a2f539b4607485 is 50, key is test_row_0/C:col10/1733764971116/Put/seqid=0 2024-12-09T17:22:54,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742219_1395 (size=12459) 2024-12-09T17:22:54,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:54,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:54,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:54,395 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209b0575e258ecb43cf8f8e547d01bd83c3_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209b0575e258ecb43cf8f8e547d01bd83c3_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:54,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/a91bacf666ed4bfc9ddc9ac435010f58, store: [table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:54,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/a91bacf666ed4bfc9ddc9ac435010f58 is 175, key is test_row_0/A:col10/1733764972259/Put/seqid=0 2024-12-09T17:22:54,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742220_1396 (size=31105) 2024-12-09T17:22:54,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:54,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765034406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:54,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:54,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765034407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:54,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:54,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765034408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:54,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:54,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765034409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:54,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:54,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765034512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:54,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:54,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765034514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:54,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:54,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765034514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:54,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:54,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765034514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:54,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:54,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765034716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:54,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:54,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765034719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:54,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:54,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765034719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:54,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:54,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765034720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:54,730 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/a31284b490d7406580a2f539b4607485 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/a31284b490d7406580a2f539b4607485 2024-12-09T17:22:54,733 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a7680059af346aa87c054fb00a90c2f/C of 5a7680059af346aa87c054fb00a90c2f into a31284b490d7406580a2f539b4607485(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:54,733 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:54,733 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., storeName=5a7680059af346aa87c054fb00a90c2f/C, priority=13, startTime=1733764973886; duration=0sec 2024-12-09T17:22:54,734 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:54,734 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a7680059af346aa87c054fb00a90c2f:C 2024-12-09T17:22:54,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-09T17:22:54,799 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=155, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/a91bacf666ed4bfc9ddc9ac435010f58 2024-12-09T17:22:54,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/4ea1591811b2410f9209c401947b7c98 is 50, key is test_row_0/B:col10/1733764972259/Put/seqid=0 2024-12-09T17:22:54,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742221_1397 (size=12151) 2024-12-09T17:22:55,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:55,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765035020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:55,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:55,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765035024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:55,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:55,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765035024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:55,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:55,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765035024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:55,208 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/4ea1591811b2410f9209c401947b7c98 2024-12-09T17:22:55,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/d9c0ff474f1945a689a9435b51635db0 is 50, key is test_row_0/C:col10/1733764972259/Put/seqid=0 2024-12-09T17:22:55,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742222_1398 (size=12151) 2024-12-09T17:22:55,527 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:55,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765035526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:55,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:55,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765035527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:55,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:55,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765035528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:55,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:55,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765035531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:55,617 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/d9c0ff474f1945a689a9435b51635db0 2024-12-09T17:22:55,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/a91bacf666ed4bfc9ddc9ac435010f58 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/a91bacf666ed4bfc9ddc9ac435010f58 2024-12-09T17:22:55,623 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/a91bacf666ed4bfc9ddc9ac435010f58, entries=150, sequenceid=155, filesize=30.4 K 2024-12-09T17:22:55,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/4ea1591811b2410f9209c401947b7c98 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/4ea1591811b2410f9209c401947b7c98 2024-12-09T17:22:55,626 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/4ea1591811b2410f9209c401947b7c98, entries=150, sequenceid=155, filesize=11.9 K 2024-12-09T17:22:55,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/d9c0ff474f1945a689a9435b51635db0 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/d9c0ff474f1945a689a9435b51635db0 2024-12-09T17:22:55,630 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/d9c0ff474f1945a689a9435b51635db0, entries=150, sequenceid=155, filesize=11.9 K 2024-12-09T17:22:55,631 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 5a7680059af346aa87c054fb00a90c2f in 1649ms, sequenceid=155, compaction requested=false 2024-12-09T17:22:55,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:55,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:55,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-12-09T17:22:55,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-12-09T17:22:55,633 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-12-09T17:22:55,633 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9540 sec 2024-12-09T17:22:55,633 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 1.9570 sec 2024-12-09T17:22:55,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-09T17:22:55,781 INFO [Thread-1607 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-12-09T17:22:55,782 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:22:55,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-12-09T17:22:55,783 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:22:55,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-09T17:22:55,783 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:22:55,783 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:22:55,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-09T17:22:55,934 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:55,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-09T17:22:55,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:55,935 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 5a7680059af346aa87c054fb00a90c2f 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-09T17:22:55,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=A 2024-12-09T17:22:55,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:55,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=B 2024-12-09T17:22:55,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:55,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=C 2024-12-09T17:22:55,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:55,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209f519859bd2404636b7cd480bb79a9d80_5a7680059af346aa87c054fb00a90c2f is 50, key is test_row_0/A:col10/1733764974408/Put/seqid=0 2024-12-09T17:22:55,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742223_1399 (size=12304) 2024-12-09T17:22:56,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-09T17:22:56,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:56,346 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209f519859bd2404636b7cd480bb79a9d80_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209f519859bd2404636b7cd480bb79a9d80_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:56,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/6d1703b260e84717820eedd592db5537, store: [table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:56,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/6d1703b260e84717820eedd592db5537 is 175, key is test_row_0/A:col10/1733764974408/Put/seqid=0 2024-12-09T17:22:56,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742224_1400 (size=31105) 2024-12-09T17:22:56,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-09T17:22:56,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:56,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:56,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:56,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765036555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:56,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:56,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765036556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:56,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:56,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765036560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:56,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:56,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765036560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:56,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:56,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765036662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:56,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:56,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765036662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:56,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:56,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765036663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:56,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:56,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765036665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:56,751 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=174, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/6d1703b260e84717820eedd592db5537 2024-12-09T17:22:56,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/3d886cb436fe4725822756478406351e is 50, key is test_row_0/B:col10/1733764974408/Put/seqid=0 2024-12-09T17:22:56,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742225_1401 (size=12151) 2024-12-09T17:22:56,870 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:56,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765036866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:56,870 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:56,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765036866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:56,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:56,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765036867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:56,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:56,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765036868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:56,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-09T17:22:57,159 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/3d886cb436fe4725822756478406351e 2024-12-09T17:22:57,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/227ac1824c164a6489065788ec4659b4 is 50, key is test_row_0/C:col10/1733764974408/Put/seqid=0 2024-12-09T17:22:57,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742226_1402 (size=12151) 2024-12-09T17:22:57,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:57,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765037171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:57,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:57,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765037172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:57,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:57,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765037174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:57,177 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:57,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765037177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:57,568 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/227ac1824c164a6489065788ec4659b4 2024-12-09T17:22:57,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/6d1703b260e84717820eedd592db5537 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/6d1703b260e84717820eedd592db5537 2024-12-09T17:22:57,574 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/6d1703b260e84717820eedd592db5537, entries=150, sequenceid=174, filesize=30.4 K 2024-12-09T17:22:57,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/3d886cb436fe4725822756478406351e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/3d886cb436fe4725822756478406351e 2024-12-09T17:22:57,577 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/3d886cb436fe4725822756478406351e, entries=150, sequenceid=174, filesize=11.9 K 2024-12-09T17:22:57,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/227ac1824c164a6489065788ec4659b4 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/227ac1824c164a6489065788ec4659b4 2024-12-09T17:22:57,580 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/227ac1824c164a6489065788ec4659b4, entries=150, sequenceid=174, filesize=11.9 K 2024-12-09T17:22:57,581 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 5a7680059af346aa87c054fb00a90c2f in 1646ms, sequenceid=174, compaction requested=true 2024-12-09T17:22:57,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:57,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:57,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-12-09T17:22:57,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-12-09T17:22:57,582 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-12-09T17:22:57,583 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7990 sec 2024-12-09T17:22:57,583 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 1.8010 sec 2024-12-09T17:22:57,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:57,676 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a7680059af346aa87c054fb00a90c2f 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-09T17:22:57,676 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=A 2024-12-09T17:22:57,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:57,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=B 2024-12-09T17:22:57,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:57,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=C 2024-12-09T17:22:57,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:57,682 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209b9a8ddac7650465890c2e47e11556021_5a7680059af346aa87c054fb00a90c2f is 50, key is test_row_0/A:col10/1733764976555/Put/seqid=0 2024-12-09T17:22:57,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742227_1403 (size=14794) 2024-12-09T17:22:57,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:57,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765037691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:57,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:57,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765037692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:57,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:57,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765037692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:57,697 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:57,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765037693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:57,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:57,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765037796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:57,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:57,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765037798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:57,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:57,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765037798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:57,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:57,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765037798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:57,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-09T17:22:57,886 INFO [Thread-1607 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-12-09T17:22:57,887 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:22:57,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-12-09T17:22:57,889 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:22:57,889 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:22:57,889 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:22:57,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-09T17:22:57,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-09T17:22:58,003 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:58,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765037999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:58,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:58,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765038001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:58,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:58,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765038002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:58,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:58,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765038002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:58,041 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:58,041 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-09T17:22:58,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:58,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:58,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:58,042 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:58,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:58,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:58,085 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:58,088 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209b9a8ddac7650465890c2e47e11556021_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209b9a8ddac7650465890c2e47e11556021_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:58,089 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/9e58eb97728445cc9e8ded1092166fee, store: [table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:58,090 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/9e58eb97728445cc9e8ded1092166fee is 175, key is test_row_0/A:col10/1733764976555/Put/seqid=0 2024-12-09T17:22:58,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742228_1404 (size=39749) 2024-12-09T17:22:58,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-09T17:22:58,193 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:58,194 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-09T17:22:58,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:58,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:58,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:58,194 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:58,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:58,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:58,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:58,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765038304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:58,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:58,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765038306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:58,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:58,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765038306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:58,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:58,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765038307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:58,345 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:58,346 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-09T17:22:58,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:58,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:58,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:58,346 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:58,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:58,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:58,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-09T17:22:58,493 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=196, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/9e58eb97728445cc9e8ded1092166fee 2024-12-09T17:22:58,497 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:58,498 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/fdb541b0f9e24d3ebf7fd783416be3e9 is 50, key is test_row_0/B:col10/1733764976555/Put/seqid=0 2024-12-09T17:22:58,498 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-09T17:22:58,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:58,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:58,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:58,498 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:58,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:58,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:58,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742229_1405 (size=12151) 2024-12-09T17:22:58,650 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:58,651 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-09T17:22:58,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:58,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:58,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:58,651 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:58,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:58,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:58,803 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:58,803 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-09T17:22:58,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:58,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:58,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:58,803 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:58,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:58,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:58,811 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:58,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765038809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:58,813 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:58,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765038811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:58,813 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:58,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765038812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:58,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:58,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765038814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:58,902 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/fdb541b0f9e24d3ebf7fd783416be3e9 2024-12-09T17:22:58,907 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/7d6d39af999b4f78a76f8b533803a413 is 50, key is test_row_0/C:col10/1733764976555/Put/seqid=0 2024-12-09T17:22:58,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742230_1406 (size=12151) 2024-12-09T17:22:58,955 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:58,955 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-09T17:22:58,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:58,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:58,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:58,956 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:58,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:58,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:58,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-09T17:22:59,107 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:59,108 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-09T17:22:59,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:59,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:59,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:59,108 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:59,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:59,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:59,260 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:59,260 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-09T17:22:59,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:59,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:59,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:59,260 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:59,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:59,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:22:59,313 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/7d6d39af999b4f78a76f8b533803a413 2024-12-09T17:22:59,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/9e58eb97728445cc9e8ded1092166fee as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/9e58eb97728445cc9e8ded1092166fee 2024-12-09T17:22:59,319 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/9e58eb97728445cc9e8ded1092166fee, entries=200, sequenceid=196, filesize=38.8 K 2024-12-09T17:22:59,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/fdb541b0f9e24d3ebf7fd783416be3e9 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/fdb541b0f9e24d3ebf7fd783416be3e9 2024-12-09T17:22:59,322 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/fdb541b0f9e24d3ebf7fd783416be3e9, entries=150, sequenceid=196, filesize=11.9 K 2024-12-09T17:22:59,323 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/7d6d39af999b4f78a76f8b533803a413 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/7d6d39af999b4f78a76f8b533803a413 2024-12-09T17:22:59,325 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/7d6d39af999b4f78a76f8b533803a413, entries=150, sequenceid=196, filesize=11.9 K 2024-12-09T17:22:59,326 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 5a7680059af346aa87c054fb00a90c2f in 1649ms, sequenceid=196, compaction requested=true 2024-12-09T17:22:59,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:59,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a7680059af346aa87c054fb00a90c2f:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:22:59,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:59,326 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T17:22:59,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a7680059af346aa87c054fb00a90c2f:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:22:59,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:59,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a7680059af346aa87c054fb00a90c2f:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:22:59,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:59,326 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T17:22:59,326 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48912 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-09T17:22:59,326 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133372 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-09T17:22:59,326 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 5a7680059af346aa87c054fb00a90c2f/B is initiating minor compaction (all files) 2024-12-09T17:22:59,326 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 5a7680059af346aa87c054fb00a90c2f/A is initiating minor compaction (all files) 2024-12-09T17:22:59,327 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a7680059af346aa87c054fb00a90c2f/B in TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:59,327 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a7680059af346aa87c054fb00a90c2f/A in TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:59,327 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/138478ad04074c199bfb0858091e0e14, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/4ea1591811b2410f9209c401947b7c98, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/3d886cb436fe4725822756478406351e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/fdb541b0f9e24d3ebf7fd783416be3e9] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp, totalSize=47.8 K 2024-12-09T17:22:59,327 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/4f72e0a7c4714825918dcafd0157e736, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/a91bacf666ed4bfc9ddc9ac435010f58, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/6d1703b260e84717820eedd592db5537, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/9e58eb97728445cc9e8ded1092166fee] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp, totalSize=130.2 K 2024-12-09T17:22:59,327 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:59,327 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. files: [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/4f72e0a7c4714825918dcafd0157e736, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/a91bacf666ed4bfc9ddc9ac435010f58, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/6d1703b260e84717820eedd592db5537, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/9e58eb97728445cc9e8ded1092166fee] 2024-12-09T17:22:59,327 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 138478ad04074c199bfb0858091e0e14, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733764971116 2024-12-09T17:22:59,327 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f72e0a7c4714825918dcafd0157e736, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733764971116 2024-12-09T17:22:59,327 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ea1591811b2410f9209c401947b7c98, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733764972252 2024-12-09T17:22:59,327 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a91bacf666ed4bfc9ddc9ac435010f58, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733764972252 2024-12-09T17:22:59,327 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d886cb436fe4725822756478406351e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733764974400 2024-12-09T17:22:59,327 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d1703b260e84717820eedd592db5537, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733764974400 2024-12-09T17:22:59,327 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting fdb541b0f9e24d3ebf7fd783416be3e9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733764976555 2024-12-09T17:22:59,328 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e58eb97728445cc9e8ded1092166fee, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733764976555 2024-12-09T17:22:59,333 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:59,334 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a7680059af346aa87c054fb00a90c2f#B#compaction#342 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:59,334 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/1038672af55546bab81e0ae18b7ae292 is 50, key is test_row_0/B:col10/1733764976555/Put/seqid=0 2024-12-09T17:22:59,337 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241209b647d44b6b7045a685ac46a195a7164a_5a7680059af346aa87c054fb00a90c2f store=[table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:59,338 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241209b647d44b6b7045a685ac46a195a7164a_5a7680059af346aa87c054fb00a90c2f, store=[table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:59,338 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209b647d44b6b7045a685ac46a195a7164a_5a7680059af346aa87c054fb00a90c2f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:59,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742232_1408 (size=4469) 2024-12-09T17:22:59,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742231_1407 (size=12595) 2024-12-09T17:22:59,351 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a7680059af346aa87c054fb00a90c2f#A#compaction#343 average throughput is 1.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:59,351 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/2f978fa1466747dbb2e48a1f9c0664b7 is 175, key is test_row_0/A:col10/1733764976555/Put/seqid=0 2024-12-09T17:22:59,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742233_1409 (size=31549) 2024-12-09T17:22:59,412 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:22:59,412 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-09T17:22:59,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:59,412 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 5a7680059af346aa87c054fb00a90c2f 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-09T17:22:59,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=A 2024-12-09T17:22:59,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:59,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=B 2024-12-09T17:22:59,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:59,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=C 2024-12-09T17:22:59,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:22:59,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209929ce162bd854ac094754c51740f957c_5a7680059af346aa87c054fb00a90c2f is 50, key is test_row_0/A:col10/1733764977693/Put/seqid=0 2024-12-09T17:22:59,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742234_1410 (size=12304) 2024-12-09T17:22:59,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:59,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. as already flushing 2024-12-09T17:22:59,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:59,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42880 deadline: 1733765039733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:59,753 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/1038672af55546bab81e0ae18b7ae292 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/1038672af55546bab81e0ae18b7ae292 2024-12-09T17:22:59,757 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5a7680059af346aa87c054fb00a90c2f/B of 5a7680059af346aa87c054fb00a90c2f into 1038672af55546bab81e0ae18b7ae292(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:59,757 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:59,757 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., storeName=5a7680059af346aa87c054fb00a90c2f/B, priority=12, startTime=1733764979326; duration=0sec 2024-12-09T17:22:59,757 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:22:59,757 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a7680059af346aa87c054fb00a90c2f:B 2024-12-09T17:22:59,757 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T17:22:59,758 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48912 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-09T17:22:59,758 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 5a7680059af346aa87c054fb00a90c2f/C is initiating minor compaction (all files) 2024-12-09T17:22:59,758 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a7680059af346aa87c054fb00a90c2f/C in TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:22:59,758 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/a31284b490d7406580a2f539b4607485, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/d9c0ff474f1945a689a9435b51635db0, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/227ac1824c164a6489065788ec4659b4, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/7d6d39af999b4f78a76f8b533803a413] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp, totalSize=47.8 K 2024-12-09T17:22:59,758 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting a31284b490d7406580a2f539b4607485, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733764971116 2024-12-09T17:22:59,759 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting d9c0ff474f1945a689a9435b51635db0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733764972252 2024-12-09T17:22:59,759 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 227ac1824c164a6489065788ec4659b4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733764974400 2024-12-09T17:22:59,759 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d6d39af999b4f78a76f8b533803a413, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733764976555 2024-12-09T17:22:59,767 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a7680059af346aa87c054fb00a90c2f#C#compaction#345 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:22:59,767 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/2f978fa1466747dbb2e48a1f9c0664b7 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/2f978fa1466747dbb2e48a1f9c0664b7 2024-12-09T17:22:59,767 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/dfcda29176f44992ac8d510a5a1ad35e is 50, key is test_row_0/C:col10/1733764976555/Put/seqid=0 2024-12-09T17:22:59,772 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5a7680059af346aa87c054fb00a90c2f/A of 5a7680059af346aa87c054fb00a90c2f into 2f978fa1466747dbb2e48a1f9c0664b7(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:22:59,772 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:22:59,772 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., storeName=5a7680059af346aa87c054fb00a90c2f/A, priority=12, startTime=1733764979326; duration=0sec 2024-12-09T17:22:59,772 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:22:59,772 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a7680059af346aa87c054fb00a90c2f:A 2024-12-09T17:22:59,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742235_1411 (size=12595) 2024-12-09T17:22:59,819 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:59,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765039814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:59,819 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:59,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765039815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:59,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:59,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765039817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:59,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:22:59,824 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209929ce162bd854ac094754c51740f957c_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209929ce162bd854ac094754c51740f957c_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:22:59,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/f531115cab24457f9a684c9293337121, store: [table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:22:59,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/f531115cab24457f9a684c9293337121 is 175, key is test_row_0/A:col10/1733764977693/Put/seqid=0 2024-12-09T17:22:59,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742236_1412 (size=31105) 2024-12-09T17:22:59,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:59,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765039827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:59,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:22:59,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42880 deadline: 1733765039838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:22:59,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-09T17:23:00,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:00,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42880 deadline: 1733765040042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:00,184 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/dfcda29176f44992ac8d510a5a1ad35e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/dfcda29176f44992ac8d510a5a1ad35e 2024-12-09T17:23:00,187 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5a7680059af346aa87c054fb00a90c2f/C of 5a7680059af346aa87c054fb00a90c2f into dfcda29176f44992ac8d510a5a1ad35e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:00,187 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:23:00,187 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., storeName=5a7680059af346aa87c054fb00a90c2f/C, priority=12, startTime=1733764979326; duration=0sec 2024-12-09T17:23:00,187 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:00,187 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a7680059af346aa87c054fb00a90c2f:C 2024-12-09T17:23:00,229 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=210, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/f531115cab24457f9a684c9293337121 2024-12-09T17:23:00,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/de37a40fa59e41dca7ed2eb7154870ce is 50, key is test_row_0/B:col10/1733764977693/Put/seqid=0 2024-12-09T17:23:00,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742237_1413 (size=12151) 2024-12-09T17:23:00,237 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/de37a40fa59e41dca7ed2eb7154870ce 2024-12-09T17:23:00,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/d073371e10b0441a84d2ce8eda9b6f93 is 50, key is test_row_0/C:col10/1733764977693/Put/seqid=0 2024-12-09T17:23:00,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742238_1414 (size=12151) 2024-12-09T17:23:00,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:00,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42880 deadline: 1733765040348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:00,650 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/d073371e10b0441a84d2ce8eda9b6f93 2024-12-09T17:23:00,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/f531115cab24457f9a684c9293337121 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/f531115cab24457f9a684c9293337121 2024-12-09T17:23:00,656 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/f531115cab24457f9a684c9293337121, entries=150, sequenceid=210, filesize=30.4 K 2024-12-09T17:23:00,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/de37a40fa59e41dca7ed2eb7154870ce as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/de37a40fa59e41dca7ed2eb7154870ce 2024-12-09T17:23:00,659 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/de37a40fa59e41dca7ed2eb7154870ce, entries=150, sequenceid=210, filesize=11.9 K 2024-12-09T17:23:00,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/d073371e10b0441a84d2ce8eda9b6f93 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/d073371e10b0441a84d2ce8eda9b6f93 2024-12-09T17:23:00,662 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/d073371e10b0441a84d2ce8eda9b6f93, entries=150, sequenceid=210, filesize=11.9 K 2024-12-09T17:23:00,663 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 5a7680059af346aa87c054fb00a90c2f in 1250ms, sequenceid=210, compaction requested=false 2024-12-09T17:23:00,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:23:00,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:23:00,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-12-09T17:23:00,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-12-09T17:23:00,664 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-12-09T17:23:00,665 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7750 sec 2024-12-09T17:23:00,665 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 2.7780 sec 2024-12-09T17:23:00,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:00,856 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a7680059af346aa87c054fb00a90c2f 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-09T17:23:00,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=A 2024-12-09T17:23:00,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:00,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=B 2024-12-09T17:23:00,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:00,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=C 2024-12-09T17:23:00,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:00,861 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209b74d64017755465a95e2d5883266acaa_5a7680059af346aa87c054fb00a90c2f is 50, key is test_row_0/A:col10/1733764980855/Put/seqid=0 2024-12-09T17:23:00,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742239_1415 (size=14794) 2024-12-09T17:23:00,865 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:00,867 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209b74d64017755465a95e2d5883266acaa_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209b74d64017755465a95e2d5883266acaa_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:00,868 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/2405f666c3884e1a8fd85aaf47ce52b3, store: [table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:23:00,868 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/2405f666c3884e1a8fd85aaf47ce52b3 is 175, key is test_row_0/A:col10/1733764980855/Put/seqid=0 2024-12-09T17:23:00,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742240_1416 (size=39749) 2024-12-09T17:23:00,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:00,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42880 deadline: 1733765040896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:01,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:01,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42880 deadline: 1733765041002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:01,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:01,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42880 deadline: 1733765041206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:01,271 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=236, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/2405f666c3884e1a8fd85aaf47ce52b3 2024-12-09T17:23:01,277 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/1e4273a123d34c37b4cf57e4a6124a29 is 50, key is test_row_0/B:col10/1733764980855/Put/seqid=0 2024-12-09T17:23:01,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742241_1417 (size=12151) 2024-12-09T17:23:01,357 DEBUG [Thread-1610 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5910b8c7 to 127.0.0.1:54326 2024-12-09T17:23:01,357 DEBUG [Thread-1610 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:01,358 DEBUG [Thread-1616 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x16722a1f to 127.0.0.1:54326 2024-12-09T17:23:01,358 DEBUG [Thread-1616 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:01,361 DEBUG [Thread-1614 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x26b6d860 to 127.0.0.1:54326 2024-12-09T17:23:01,361 DEBUG [Thread-1614 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:01,361 DEBUG [Thread-1612 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x003f9a05 to 127.0.0.1:54326 2024-12-09T17:23:01,361 DEBUG [Thread-1612 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:01,362 DEBUG [Thread-1608 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x42af2962 to 127.0.0.1:54326 2024-12-09T17:23:01,362 DEBUG [Thread-1608 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:01,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:01,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42880 deadline: 1733765041510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:01,682 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/1e4273a123d34c37b4cf57e4a6124a29 2024-12-09T17:23:01,693 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/3b610cbd20d444448a5b0645d5334344 is 50, key is test_row_0/C:col10/1733764980855/Put/seqid=0 2024-12-09T17:23:01,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742242_1418 (size=12151) 2024-12-09T17:23:01,831 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:01,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42864 deadline: 1733765041830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:01,833 DEBUG [Thread-1605 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4139 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., hostname=80c69eb3c456,42927,1733764865379, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:23:01,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:01,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42854 deadline: 1733765041835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:01,837 DEBUG [Thread-1599 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4145 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., hostname=80c69eb3c456,42927,1733764865379, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:23:01,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:01,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42876 deadline: 1733765041838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:01,839 DEBUG [Thread-1601 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4147 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., hostname=80c69eb3c456,42927,1733764865379, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:23:01,840 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:01,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42882 deadline: 1733765041840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:01,841 DEBUG [Thread-1603 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4149 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., hostname=80c69eb3c456,42927,1733764865379, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:23:01,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-09T17:23:01,995 INFO [Thread-1607 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-12-09T17:23:02,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:02,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42880 deadline: 1733765042014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:02,098 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/3b610cbd20d444448a5b0645d5334344 2024-12-09T17:23:02,104 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/2405f666c3884e1a8fd85aaf47ce52b3 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/2405f666c3884e1a8fd85aaf47ce52b3 2024-12-09T17:23:02,107 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/2405f666c3884e1a8fd85aaf47ce52b3, entries=200, sequenceid=236, filesize=38.8 K 2024-12-09T17:23:02,108 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/1e4273a123d34c37b4cf57e4a6124a29 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/1e4273a123d34c37b4cf57e4a6124a29 2024-12-09T17:23:02,110 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/1e4273a123d34c37b4cf57e4a6124a29, entries=150, sequenceid=236, filesize=11.9 K 2024-12-09T17:23:02,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/3b610cbd20d444448a5b0645d5334344 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/3b610cbd20d444448a5b0645d5334344 2024-12-09T17:23:02,113 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/3b610cbd20d444448a5b0645d5334344, entries=150, sequenceid=236, filesize=11.9 K 2024-12-09T17:23:02,114 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 5a7680059af346aa87c054fb00a90c2f in 1258ms, sequenceid=236, compaction requested=true 2024-12-09T17:23:02,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:23:02,114 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:02,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a7680059af346aa87c054fb00a90c2f:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:23:02,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:02,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a7680059af346aa87c054fb00a90c2f:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:23:02,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:02,114 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:02,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a7680059af346aa87c054fb00a90c2f:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:23:02,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:02,115 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102403 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:02,115 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:02,115 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 5a7680059af346aa87c054fb00a90c2f/A is initiating minor compaction (all files) 2024-12-09T17:23:02,115 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 5a7680059af346aa87c054fb00a90c2f/B is initiating minor compaction (all files) 2024-12-09T17:23:02,115 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a7680059af346aa87c054fb00a90c2f/B in TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:23:02,115 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a7680059af346aa87c054fb00a90c2f/A in TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:23:02,115 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/2f978fa1466747dbb2e48a1f9c0664b7, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/f531115cab24457f9a684c9293337121, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/2405f666c3884e1a8fd85aaf47ce52b3] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp, totalSize=100.0 K 2024-12-09T17:23:02,115 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/1038672af55546bab81e0ae18b7ae292, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/de37a40fa59e41dca7ed2eb7154870ce, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/1e4273a123d34c37b4cf57e4a6124a29] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp, totalSize=36.0 K 2024-12-09T17:23:02,115 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:23:02,115 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. files: [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/2f978fa1466747dbb2e48a1f9c0664b7, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/f531115cab24457f9a684c9293337121, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/2405f666c3884e1a8fd85aaf47ce52b3] 2024-12-09T17:23:02,115 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 1038672af55546bab81e0ae18b7ae292, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733764976555 2024-12-09T17:23:02,115 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f978fa1466747dbb2e48a1f9c0664b7, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733764976555 2024-12-09T17:23:02,115 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting de37a40fa59e41dca7ed2eb7154870ce, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733764977691 2024-12-09T17:23:02,115 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting f531115cab24457f9a684c9293337121, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733764977691 2024-12-09T17:23:02,115 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e4273a123d34c37b4cf57e4a6124a29, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733764979713 2024-12-09T17:23:02,115 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2405f666c3884e1a8fd85aaf47ce52b3, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733764979701 2024-12-09T17:23:02,120 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:23:02,121 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a7680059af346aa87c054fb00a90c2f#B#compaction#351 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:02,121 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/fe914bd93cf04c4a9b9572c9dd6c0e2a is 50, key is test_row_0/B:col10/1733764980855/Put/seqid=0 2024-12-09T17:23:02,121 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241209eb0ed0ed20b24621a3153986ae43e8cf_5a7680059af346aa87c054fb00a90c2f store=[table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:23:02,123 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241209eb0ed0ed20b24621a3153986ae43e8cf_5a7680059af346aa87c054fb00a90c2f, store=[table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:23:02,123 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209eb0ed0ed20b24621a3153986ae43e8cf_5a7680059af346aa87c054fb00a90c2f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:23:02,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742243_1419 (size=12697) 2024-12-09T17:23:02,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742244_1420 (size=4469) 2024-12-09T17:23:02,528 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a7680059af346aa87c054fb00a90c2f#A#compaction#352 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:02,530 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/583e39c648e64944b37ff48636b40ac5 is 175, key is test_row_0/A:col10/1733764980855/Put/seqid=0 2024-12-09T17:23:02,534 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/fe914bd93cf04c4a9b9572c9dd6c0e2a as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/fe914bd93cf04c4a9b9572c9dd6c0e2a 2024-12-09T17:23:02,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742245_1421 (size=31651) 2024-12-09T17:23:02,538 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a7680059af346aa87c054fb00a90c2f/B of 5a7680059af346aa87c054fb00a90c2f into fe914bd93cf04c4a9b9572c9dd6c0e2a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:02,538 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:23:02,538 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., storeName=5a7680059af346aa87c054fb00a90c2f/B, priority=13, startTime=1733764982114; duration=0sec 2024-12-09T17:23:02,538 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:02,538 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a7680059af346aa87c054fb00a90c2f:B 2024-12-09T17:23:02,538 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:02,539 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:02,539 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 5a7680059af346aa87c054fb00a90c2f/C is initiating minor compaction (all files) 2024-12-09T17:23:02,540 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a7680059af346aa87c054fb00a90c2f/C in TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:23:02,540 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/dfcda29176f44992ac8d510a5a1ad35e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/d073371e10b0441a84d2ce8eda9b6f93, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/3b610cbd20d444448a5b0645d5334344] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp, totalSize=36.0 K 2024-12-09T17:23:02,540 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting dfcda29176f44992ac8d510a5a1ad35e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733764976555 2024-12-09T17:23:02,540 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting d073371e10b0441a84d2ce8eda9b6f93, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733764977691 2024-12-09T17:23:02,541 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b610cbd20d444448a5b0645d5334344, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733764979713 2024-12-09T17:23:02,547 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a7680059af346aa87c054fb00a90c2f#C#compaction#353 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:02,547 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/663c20e589e54a5194dbfca17e9cc951 is 50, key is test_row_0/C:col10/1733764980855/Put/seqid=0 2024-12-09T17:23:02,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742246_1422 (size=12697) 2024-12-09T17:23:02,946 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/583e39c648e64944b37ff48636b40ac5 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/583e39c648e64944b37ff48636b40ac5 2024-12-09T17:23:02,953 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a7680059af346aa87c054fb00a90c2f/A of 5a7680059af346aa87c054fb00a90c2f into 583e39c648e64944b37ff48636b40ac5(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:02,953 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:23:02,953 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., storeName=5a7680059af346aa87c054fb00a90c2f/A, priority=13, startTime=1733764982114; duration=0sec 2024-12-09T17:23:02,953 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:02,953 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a7680059af346aa87c054fb00a90c2f:A 2024-12-09T17:23:02,956 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/663c20e589e54a5194dbfca17e9cc951 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/663c20e589e54a5194dbfca17e9cc951 2024-12-09T17:23:02,960 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a7680059af346aa87c054fb00a90c2f/C of 5a7680059af346aa87c054fb00a90c2f into 663c20e589e54a5194dbfca17e9cc951(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:02,960 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:23:02,960 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f., storeName=5a7680059af346aa87c054fb00a90c2f/C, priority=13, startTime=1733764982114; duration=0sec 2024-12-09T17:23:02,960 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:02,960 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a7680059af346aa87c054fb00a90c2f:C 2024-12-09T17:23:03,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:03,023 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a7680059af346aa87c054fb00a90c2f 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-09T17:23:03,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=A 2024-12-09T17:23:03,024 DEBUG [Thread-1597 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x033feebb to 127.0.0.1:54326 2024-12-09T17:23:03,024 DEBUG [Thread-1597 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:03,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:03,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=B 2024-12-09T17:23:03,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:03,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=C 2024-12-09T17:23:03,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:03,033 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412096c031ea7444c467697623dbe6fcf9b26_5a7680059af346aa87c054fb00a90c2f is 50, key is test_row_0/A:col10/1733764980895/Put/seqid=0 2024-12-09T17:23:03,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742247_1423 (size=12304) 2024-12-09T17:23:03,437 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:03,444 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412096c031ea7444c467697623dbe6fcf9b26_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412096c031ea7444c467697623dbe6fcf9b26_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:03,446 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/c792c3d6a5684edf8016b47cafc0921c, store: [table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:23:03,447 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/c792c3d6a5684edf8016b47cafc0921c is 175, key is test_row_0/A:col10/1733764980895/Put/seqid=0 2024-12-09T17:23:03,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742248_1424 (size=31105) 2024-12-09T17:23:03,853 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=253, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/c792c3d6a5684edf8016b47cafc0921c 2024-12-09T17:23:03,857 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T17:23:03,865 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/b4626b164a5440d5a7c339f795b92461 is 50, key is test_row_0/B:col10/1733764980895/Put/seqid=0 2024-12-09T17:23:03,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742249_1425 (size=12151) 2024-12-09T17:23:04,272 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/b4626b164a5440d5a7c339f795b92461 2024-12-09T17:23:04,284 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/d409289661bb449b8661cafae8611e15 is 50, key is test_row_0/C:col10/1733764980895/Put/seqid=0 2024-12-09T17:23:04,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742250_1426 (size=12151) 2024-12-09T17:23:04,690 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/d409289661bb449b8661cafae8611e15 2024-12-09T17:23:04,701 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/c792c3d6a5684edf8016b47cafc0921c as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/c792c3d6a5684edf8016b47cafc0921c 2024-12-09T17:23:04,705 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/c792c3d6a5684edf8016b47cafc0921c, entries=150, sequenceid=253, filesize=30.4 K 2024-12-09T17:23:04,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/b4626b164a5440d5a7c339f795b92461 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/b4626b164a5440d5a7c339f795b92461 2024-12-09T17:23:04,709 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/b4626b164a5440d5a7c339f795b92461, entries=150, sequenceid=253, filesize=11.9 K 2024-12-09T17:23:04,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/d409289661bb449b8661cafae8611e15 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/d409289661bb449b8661cafae8611e15 2024-12-09T17:23:04,713 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/d409289661bb449b8661cafae8611e15, entries=150, sequenceid=253, filesize=11.9 K 2024-12-09T17:23:04,713 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=0 B/0 for 5a7680059af346aa87c054fb00a90c2f in 1690ms, sequenceid=253, compaction requested=false 2024-12-09T17:23:04,713 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:23:05,851 DEBUG [Thread-1599 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c40db2e to 127.0.0.1:54326 2024-12-09T17:23:05,851 DEBUG [Thread-1599 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:05,857 DEBUG [Thread-1603 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3401188a to 127.0.0.1:54326 2024-12-09T17:23:05,857 DEBUG [Thread-1603 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:05,870 DEBUG [Thread-1605 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x55650656 to 127.0.0.1:54326 2024-12-09T17:23:05,870 DEBUG [Thread-1605 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:05,878 DEBUG [Thread-1601 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7a86cb71 to 127.0.0.1:54326 2024-12-09T17:23:05,878 DEBUG [Thread-1601 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:05,878 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-09T17:23:05,878 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 37 2024-12-09T17:23:05,878 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 43 2024-12-09T17:23:05,878 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 43 2024-12-09T17:23:05,879 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 38 2024-12-09T17:23:05,879 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 38 2024-12-09T17:23:05,879 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-09T17:23:05,879 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-09T17:23:05,879 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3277 2024-12-09T17:23:05,879 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9831 rows 2024-12-09T17:23:05,879 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3259 2024-12-09T17:23:05,879 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9771 rows 2024-12-09T17:23:05,879 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3263 2024-12-09T17:23:05,879 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9788 rows 2024-12-09T17:23:05,879 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3237 2024-12-09T17:23:05,879 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9707 rows 2024-12-09T17:23:05,879 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3273 2024-12-09T17:23:05,879 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9818 rows 2024-12-09T17:23:05,879 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-09T17:23:05,879 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4dacfd49 to 127.0.0.1:54326 2024-12-09T17:23:05,879 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:05,884 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-09T17:23:05,885 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-09T17:23:05,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-09T17:23:05,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-09T17:23:05,888 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733764985888"}]},"ts":"1733764985888"} 2024-12-09T17:23:05,889 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-09T17:23:05,919 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-09T17:23:05,920 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-09T17:23:05,921 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a7680059af346aa87c054fb00a90c2f, UNASSIGN}] 2024-12-09T17:23:05,922 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a7680059af346aa87c054fb00a90c2f, UNASSIGN 2024-12-09T17:23:05,922 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=5a7680059af346aa87c054fb00a90c2f, regionState=CLOSING, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:05,923 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T17:23:05,923 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; CloseRegionProcedure 5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379}] 2024-12-09T17:23:05,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-09T17:23:06,075 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:06,076 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] handler.UnassignRegionHandler(124): Close 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:06,076 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-09T17:23:06,076 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1681): Closing 5a7680059af346aa87c054fb00a90c2f, disabling compactions & flushes 2024-12-09T17:23:06,076 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:23:06,076 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:23:06,076 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. after waiting 0 ms 2024-12-09T17:23:06,076 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:23:06,077 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(2837): Flushing 5a7680059af346aa87c054fb00a90c2f 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-09T17:23:06,077 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=A 2024-12-09T17:23:06,077 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:06,078 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=B 2024-12-09T17:23:06,078 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:06,078 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a7680059af346aa87c054fb00a90c2f, store=C 2024-12-09T17:23:06,078 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:06,087 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209cb7cc4299e77423083884b95a9455e33_5a7680059af346aa87c054fb00a90c2f is 50, key is test_row_0/A:col10/1733764985876/Put/seqid=0 2024-12-09T17:23:06,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742251_1427 (size=9914) 2024-12-09T17:23:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-09T17:23:06,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-09T17:23:06,492 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:06,502 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209cb7cc4299e77423083884b95a9455e33_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209cb7cc4299e77423083884b95a9455e33_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:06,503 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/c99f148b79d94cc3a1b49863e1813268, store: [table=TestAcidGuarantees family=A region=5a7680059af346aa87c054fb00a90c2f] 2024-12-09T17:23:06,504 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/c99f148b79d94cc3a1b49863e1813268 is 175, key is test_row_0/A:col10/1733764985876/Put/seqid=0 2024-12-09T17:23:06,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742252_1428 (size=22561) 2024-12-09T17:23:06,910 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=260, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/c99f148b79d94cc3a1b49863e1813268 2024-12-09T17:23:06,922 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/929986dc8032462ab42365319e42c48e is 50, key is test_row_0/B:col10/1733764985876/Put/seqid=0 2024-12-09T17:23:06,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742253_1429 (size=9857) 2024-12-09T17:23:06,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-09T17:23:07,327 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/929986dc8032462ab42365319e42c48e 2024-12-09T17:23:07,341 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/359c9fe764fe4c2b98e0a33c1fd84fe8 is 50, key is test_row_0/C:col10/1733764985876/Put/seqid=0 2024-12-09T17:23:07,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742254_1430 (size=9857) 2024-12-09T17:23:07,747 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/359c9fe764fe4c2b98e0a33c1fd84fe8 2024-12-09T17:23:07,757 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/A/c99f148b79d94cc3a1b49863e1813268 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/c99f148b79d94cc3a1b49863e1813268 2024-12-09T17:23:07,761 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/c99f148b79d94cc3a1b49863e1813268, entries=100, sequenceid=260, filesize=22.0 K 2024-12-09T17:23:07,762 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/B/929986dc8032462ab42365319e42c48e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/929986dc8032462ab42365319e42c48e 2024-12-09T17:23:07,767 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/929986dc8032462ab42365319e42c48e, entries=100, sequenceid=260, filesize=9.6 K 2024-12-09T17:23:07,768 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/.tmp/C/359c9fe764fe4c2b98e0a33c1fd84fe8 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/359c9fe764fe4c2b98e0a33c1fd84fe8 2024-12-09T17:23:07,771 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/359c9fe764fe4c2b98e0a33c1fd84fe8, entries=100, sequenceid=260, filesize=9.6 K 2024-12-09T17:23:07,772 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 5a7680059af346aa87c054fb00a90c2f in 1696ms, sequenceid=260, compaction requested=true 2024-12-09T17:23:07,773 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/e8b0efd21e3b4a11a476f845882ebfe0, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/a3c3e33b08804e67976e53488e45b490, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/be363441ace6414a8dcf43e95c7d6400, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/7f7a0093db8b4e6494fa81fe8cb31bf7, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/a1adaa7649184e69b82847f6ff84e50e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/cc21a649e9464829a3500bcb38c90d7b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/21f1e1fe87fc4a5f95ffdfaea3e07aec, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/527fbf220e82427e895fa4eb7c95f0ff, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/c493e974a3b64fd4a980f8a439462312, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/4f72e0a7c4714825918dcafd0157e736, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/a91bacf666ed4bfc9ddc9ac435010f58, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/6d1703b260e84717820eedd592db5537, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/9e58eb97728445cc9e8ded1092166fee, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/2f978fa1466747dbb2e48a1f9c0664b7, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/f531115cab24457f9a684c9293337121, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/2405f666c3884e1a8fd85aaf47ce52b3] to archive 2024-12-09T17:23:07,773 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T17:23:07,775 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/e8b0efd21e3b4a11a476f845882ebfe0 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/e8b0efd21e3b4a11a476f845882ebfe0 2024-12-09T17:23:07,776 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/a3c3e33b08804e67976e53488e45b490 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/a3c3e33b08804e67976e53488e45b490 2024-12-09T17:23:07,777 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/be363441ace6414a8dcf43e95c7d6400 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/be363441ace6414a8dcf43e95c7d6400 2024-12-09T17:23:07,778 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/7f7a0093db8b4e6494fa81fe8cb31bf7 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/7f7a0093db8b4e6494fa81fe8cb31bf7 2024-12-09T17:23:07,779 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/a1adaa7649184e69b82847f6ff84e50e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/a1adaa7649184e69b82847f6ff84e50e 2024-12-09T17:23:07,779 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/cc21a649e9464829a3500bcb38c90d7b to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/cc21a649e9464829a3500bcb38c90d7b 2024-12-09T17:23:07,780 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/21f1e1fe87fc4a5f95ffdfaea3e07aec to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/21f1e1fe87fc4a5f95ffdfaea3e07aec 2024-12-09T17:23:07,781 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/527fbf220e82427e895fa4eb7c95f0ff to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/527fbf220e82427e895fa4eb7c95f0ff 2024-12-09T17:23:07,782 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/c493e974a3b64fd4a980f8a439462312 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/c493e974a3b64fd4a980f8a439462312 2024-12-09T17:23:07,783 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/4f72e0a7c4714825918dcafd0157e736 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/4f72e0a7c4714825918dcafd0157e736 2024-12-09T17:23:07,784 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/a91bacf666ed4bfc9ddc9ac435010f58 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/a91bacf666ed4bfc9ddc9ac435010f58 2024-12-09T17:23:07,784 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/6d1703b260e84717820eedd592db5537 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/6d1703b260e84717820eedd592db5537 2024-12-09T17:23:07,785 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/9e58eb97728445cc9e8ded1092166fee to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/9e58eb97728445cc9e8ded1092166fee 2024-12-09T17:23:07,786 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/2f978fa1466747dbb2e48a1f9c0664b7 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/2f978fa1466747dbb2e48a1f9c0664b7 2024-12-09T17:23:07,787 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/f531115cab24457f9a684c9293337121 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/f531115cab24457f9a684c9293337121 2024-12-09T17:23:07,788 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/2405f666c3884e1a8fd85aaf47ce52b3 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/2405f666c3884e1a8fd85aaf47ce52b3 2024-12-09T17:23:07,789 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/b6a924e1b9eb4782921000f511243274, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/a7f3de00433142338f86410438756450, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/56700c817ab845339f29fef2b175301b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/4a05a2031eb54ed9a36df2e705fe7d44, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/c9e824c3f90e48758852ad8ff594b2e2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/d545279225004b66a9df355f05a4ef8c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/f511895c85924ad092a630033472354b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/b9411dfd0f8341b781a66c989c1e21a1, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/138478ad04074c199bfb0858091e0e14, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/9c7bc3af9961459d8f20dbfdd9eb8758, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/4ea1591811b2410f9209c401947b7c98, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/3d886cb436fe4725822756478406351e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/1038672af55546bab81e0ae18b7ae292, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/fdb541b0f9e24d3ebf7fd783416be3e9, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/de37a40fa59e41dca7ed2eb7154870ce, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/1e4273a123d34c37b4cf57e4a6124a29] to archive 2024-12-09T17:23:07,789 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T17:23:07,790 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/b6a924e1b9eb4782921000f511243274 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/b6a924e1b9eb4782921000f511243274 2024-12-09T17:23:07,791 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/a7f3de00433142338f86410438756450 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/a7f3de00433142338f86410438756450 2024-12-09T17:23:07,792 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/56700c817ab845339f29fef2b175301b to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/56700c817ab845339f29fef2b175301b 2024-12-09T17:23:07,793 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/4a05a2031eb54ed9a36df2e705fe7d44 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/4a05a2031eb54ed9a36df2e705fe7d44 2024-12-09T17:23:07,793 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/c9e824c3f90e48758852ad8ff594b2e2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/c9e824c3f90e48758852ad8ff594b2e2 2024-12-09T17:23:07,794 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/d545279225004b66a9df355f05a4ef8c to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/d545279225004b66a9df355f05a4ef8c 2024-12-09T17:23:07,795 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/f511895c85924ad092a630033472354b to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/f511895c85924ad092a630033472354b 2024-12-09T17:23:07,796 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/b9411dfd0f8341b781a66c989c1e21a1 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/b9411dfd0f8341b781a66c989c1e21a1 2024-12-09T17:23:07,797 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/138478ad04074c199bfb0858091e0e14 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/138478ad04074c199bfb0858091e0e14 2024-12-09T17:23:07,797 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/9c7bc3af9961459d8f20dbfdd9eb8758 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/9c7bc3af9961459d8f20dbfdd9eb8758 2024-12-09T17:23:07,798 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/4ea1591811b2410f9209c401947b7c98 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/4ea1591811b2410f9209c401947b7c98 2024-12-09T17:23:07,799 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/3d886cb436fe4725822756478406351e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/3d886cb436fe4725822756478406351e 2024-12-09T17:23:07,799 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/1038672af55546bab81e0ae18b7ae292 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/1038672af55546bab81e0ae18b7ae292 2024-12-09T17:23:07,800 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/fdb541b0f9e24d3ebf7fd783416be3e9 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/fdb541b0f9e24d3ebf7fd783416be3e9 2024-12-09T17:23:07,801 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/de37a40fa59e41dca7ed2eb7154870ce to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/de37a40fa59e41dca7ed2eb7154870ce 2024-12-09T17:23:07,802 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/1e4273a123d34c37b4cf57e4a6124a29 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/1e4273a123d34c37b4cf57e4a6124a29 2024-12-09T17:23:07,802 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/51ae0f2409574964b24a423e3e290ee6, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/13355335a79b4b999b236dac286f74b8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/2bd3a5224a504185aeab8efca2e8b7a9, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/bc33f64bdf6b4ae3975a80af872abad5, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/5af132d1e3ff4dcf96aec2848c2c1790, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/e3b9cd93b9ec405481db9d8d82fc1234, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/7c111ff48da64c489f38d7edbdf6e281, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/9d24da3589424c0b8830a91632556a7a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/a31284b490d7406580a2f539b4607485, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/b02ecaab47e243dcadc0109325b33181, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/d9c0ff474f1945a689a9435b51635db0, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/227ac1824c164a6489065788ec4659b4, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/dfcda29176f44992ac8d510a5a1ad35e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/7d6d39af999b4f78a76f8b533803a413, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/d073371e10b0441a84d2ce8eda9b6f93, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/3b610cbd20d444448a5b0645d5334344] to archive 2024-12-09T17:23:07,803 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T17:23:07,804 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/51ae0f2409574964b24a423e3e290ee6 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/51ae0f2409574964b24a423e3e290ee6 2024-12-09T17:23:07,805 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/13355335a79b4b999b236dac286f74b8 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/13355335a79b4b999b236dac286f74b8 2024-12-09T17:23:07,805 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/2bd3a5224a504185aeab8efca2e8b7a9 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/2bd3a5224a504185aeab8efca2e8b7a9 2024-12-09T17:23:07,806 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/bc33f64bdf6b4ae3975a80af872abad5 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/bc33f64bdf6b4ae3975a80af872abad5 2024-12-09T17:23:07,807 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/5af132d1e3ff4dcf96aec2848c2c1790 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/5af132d1e3ff4dcf96aec2848c2c1790 2024-12-09T17:23:07,808 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/e3b9cd93b9ec405481db9d8d82fc1234 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/e3b9cd93b9ec405481db9d8d82fc1234 2024-12-09T17:23:07,808 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/7c111ff48da64c489f38d7edbdf6e281 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/7c111ff48da64c489f38d7edbdf6e281 2024-12-09T17:23:07,809 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/9d24da3589424c0b8830a91632556a7a to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/9d24da3589424c0b8830a91632556a7a 2024-12-09T17:23:07,809 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/a31284b490d7406580a2f539b4607485 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/a31284b490d7406580a2f539b4607485 2024-12-09T17:23:07,810 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/b02ecaab47e243dcadc0109325b33181 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/b02ecaab47e243dcadc0109325b33181 2024-12-09T17:23:07,811 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/d9c0ff474f1945a689a9435b51635db0 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/d9c0ff474f1945a689a9435b51635db0 2024-12-09T17:23:07,811 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/227ac1824c164a6489065788ec4659b4 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/227ac1824c164a6489065788ec4659b4 2024-12-09T17:23:07,812 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/dfcda29176f44992ac8d510a5a1ad35e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/dfcda29176f44992ac8d510a5a1ad35e 2024-12-09T17:23:07,813 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/7d6d39af999b4f78a76f8b533803a413 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/7d6d39af999b4f78a76f8b533803a413 2024-12-09T17:23:07,813 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/d073371e10b0441a84d2ce8eda9b6f93 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/d073371e10b0441a84d2ce8eda9b6f93 2024-12-09T17:23:07,814 DEBUG [StoreCloser-TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/3b610cbd20d444448a5b0645d5334344 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/3b610cbd20d444448a5b0645d5334344 2024-12-09T17:23:07,817 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/recovered.edits/263.seqid, newMaxSeqId=263, maxSeqId=4 2024-12-09T17:23:07,817 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f. 2024-12-09T17:23:07,817 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1635): Region close journal for 5a7680059af346aa87c054fb00a90c2f: 2024-12-09T17:23:07,818 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] handler.UnassignRegionHandler(170): Closed 5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:07,818 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=5a7680059af346aa87c054fb00a90c2f, regionState=CLOSED 2024-12-09T17:23:07,820 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-09T17:23:07,820 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; CloseRegionProcedure 5a7680059af346aa87c054fb00a90c2f, server=80c69eb3c456,42927,1733764865379 in 1.8960 sec 2024-12-09T17:23:07,820 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=122, resume processing ppid=121 2024-12-09T17:23:07,821 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, ppid=121, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a7680059af346aa87c054fb00a90c2f, UNASSIGN in 1.8990 sec 2024-12-09T17:23:07,821 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-09T17:23:07,821 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9010 sec 2024-12-09T17:23:07,822 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733764987822"}]},"ts":"1733764987822"} 2024-12-09T17:23:07,822 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-09T17:23:07,867 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-09T17:23:07,869 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9820 sec 2024-12-09T17:23:07,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-09T17:23:07,996 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-12-09T17:23:07,997 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-09T17:23:07,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:23:08,000 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=124, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:23:08,001 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=124, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:23:08,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-09T17:23:08,004 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:08,008 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A, FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B, FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C, FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/recovered.edits] 2024-12-09T17:23:08,013 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/583e39c648e64944b37ff48636b40ac5 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/583e39c648e64944b37ff48636b40ac5 2024-12-09T17:23:08,016 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/c792c3d6a5684edf8016b47cafc0921c to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/c792c3d6a5684edf8016b47cafc0921c 2024-12-09T17:23:08,018 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/c99f148b79d94cc3a1b49863e1813268 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/A/c99f148b79d94cc3a1b49863e1813268 2024-12-09T17:23:08,020 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/929986dc8032462ab42365319e42c48e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/929986dc8032462ab42365319e42c48e 2024-12-09T17:23:08,021 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/b4626b164a5440d5a7c339f795b92461 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/b4626b164a5440d5a7c339f795b92461 2024-12-09T17:23:08,022 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/fe914bd93cf04c4a9b9572c9dd6c0e2a to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/B/fe914bd93cf04c4a9b9572c9dd6c0e2a 2024-12-09T17:23:08,024 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/359c9fe764fe4c2b98e0a33c1fd84fe8 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/359c9fe764fe4c2b98e0a33c1fd84fe8 2024-12-09T17:23:08,024 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/663c20e589e54a5194dbfca17e9cc951 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/663c20e589e54a5194dbfca17e9cc951 2024-12-09T17:23:08,025 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/d409289661bb449b8661cafae8611e15 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/C/d409289661bb449b8661cafae8611e15 2024-12-09T17:23:08,027 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/recovered.edits/263.seqid to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f/recovered.edits/263.seqid 2024-12-09T17:23:08,027 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:08,028 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-09T17:23:08,028 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-09T17:23:08,029 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-09T17:23:08,031 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412091c0f7a76f04540b8a649735312bcda73_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412091c0f7a76f04540b8a649735312bcda73_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:08,031 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120966d08858743a49428da15bb6250e04f5_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120966d08858743a49428da15bb6250e04f5_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:08,032 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412096c031ea7444c467697623dbe6fcf9b26_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412096c031ea7444c467697623dbe6fcf9b26_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:08,033 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412096c26c4bcfd994a86ab5168379166ff23_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412096c26c4bcfd994a86ab5168379166ff23_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:08,033 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209929ce162bd854ac094754c51740f957c_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209929ce162bd854ac094754c51740f957c_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:08,034 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209987c77a135f447d6b8952bc3b9e0b831_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209987c77a135f447d6b8952bc3b9e0b831_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:08,034 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209b0575e258ecb43cf8f8e547d01bd83c3_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209b0575e258ecb43cf8f8e547d01bd83c3_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:08,035 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209b74d64017755465a95e2d5883266acaa_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209b74d64017755465a95e2d5883266acaa_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:08,036 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209b8f5311e93f246a4827ec3bdfa76d5e4_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209b8f5311e93f246a4827ec3bdfa76d5e4_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:08,037 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209b9a8ddac7650465890c2e47e11556021_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209b9a8ddac7650465890c2e47e11556021_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:08,037 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209c5b41d2c73764776b3eedc44d62dd5dd_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209c5b41d2c73764776b3eedc44d62dd5dd_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:08,038 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209cb7cc4299e77423083884b95a9455e33_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209cb7cc4299e77423083884b95a9455e33_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:08,038 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209dfc8dbd36e1443c7959346c120af85e6_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209dfc8dbd36e1443c7959346c120af85e6_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:08,039 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209f519859bd2404636b7cd480bb79a9d80_5a7680059af346aa87c054fb00a90c2f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209f519859bd2404636b7cd480bb79a9d80_5a7680059af346aa87c054fb00a90c2f 2024-12-09T17:23:08,039 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-09T17:23:08,041 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=124, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:23:08,042 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-09T17:23:08,043 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-09T17:23:08,044 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=124, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:23:08,044 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-09T17:23:08,044 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733764988044"}]},"ts":"9223372036854775807"} 2024-12-09T17:23:08,045 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-09T17:23:08,045 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 5a7680059af346aa87c054fb00a90c2f, NAME => 'TestAcidGuarantees,,1733764958104.5a7680059af346aa87c054fb00a90c2f.', STARTKEY => '', ENDKEY => ''}] 2024-12-09T17:23:08,045 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-09T17:23:08,046 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733764988045"}]},"ts":"9223372036854775807"} 2024-12-09T17:23:08,047 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-09T17:23:08,053 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=124, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:23:08,053 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 56 msec 2024-12-09T17:23:08,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-09T17:23:08,103 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-12-09T17:23:08,116 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=237 (was 237), OpenFileDescriptor=452 (was 453), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=294 (was 320), ProcessCount=11 (was 11), AvailableMemoryMB=4225 (was 4223) - AvailableMemoryMB LEAK? - 2024-12-09T17:23:08,126 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=237, OpenFileDescriptor=452, MaxFileDescriptor=1048576, SystemLoadAverage=294, ProcessCount=11, AvailableMemoryMB=4225 2024-12-09T17:23:08,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-09T17:23:08,127 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T17:23:08,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=125, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-09T17:23:08,129 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T17:23:08,129 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:08,129 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 125 2024-12-09T17:23:08,130 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T17:23:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-12-09T17:23:08,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742255_1431 (size=963) 2024-12-09T17:23:08,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-12-09T17:23:08,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-12-09T17:23:08,540 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4 2024-12-09T17:23:08,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742256_1432 (size=53) 2024-12-09T17:23:08,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-12-09T17:23:08,950 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T17:23:08,951 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing a1b1655d3c492fdd18da414b0bd9edbd, disabling compactions & flushes 2024-12-09T17:23:08,951 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:08,951 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:08,951 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. after waiting 0 ms 2024-12-09T17:23:08,951 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:08,951 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:08,951 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:08,953 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T17:23:08,954 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733764988953"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733764988953"}]},"ts":"1733764988953"} 2024-12-09T17:23:08,956 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-09T17:23:08,958 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T17:23:08,958 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733764988958"}]},"ts":"1733764988958"} 2024-12-09T17:23:08,960 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-09T17:23:09,011 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a1b1655d3c492fdd18da414b0bd9edbd, ASSIGN}] 2024-12-09T17:23:09,013 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a1b1655d3c492fdd18da414b0bd9edbd, ASSIGN 2024-12-09T17:23:09,013 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=a1b1655d3c492fdd18da414b0bd9edbd, ASSIGN; state=OFFLINE, location=80c69eb3c456,42927,1733764865379; forceNewPlan=false, retain=false 2024-12-09T17:23:09,164 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=a1b1655d3c492fdd18da414b0bd9edbd, regionState=OPENING, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:09,168 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; OpenRegionProcedure a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379}] 2024-12-09T17:23:09,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-12-09T17:23:09,322 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:09,328 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:09,328 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(7285): Opening region: {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} 2024-12-09T17:23:09,329 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:09,329 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T17:23:09,329 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(7327): checking encryption for a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:09,329 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(7330): checking classloading for a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:09,333 INFO [StoreOpener-a1b1655d3c492fdd18da414b0bd9edbd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:09,335 INFO [StoreOpener-a1b1655d3c492fdd18da414b0bd9edbd-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:23:09,335 INFO [StoreOpener-a1b1655d3c492fdd18da414b0bd9edbd-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a1b1655d3c492fdd18da414b0bd9edbd columnFamilyName A 2024-12-09T17:23:09,336 DEBUG [StoreOpener-a1b1655d3c492fdd18da414b0bd9edbd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:09,336 INFO [StoreOpener-a1b1655d3c492fdd18da414b0bd9edbd-1 {}] regionserver.HStore(327): Store=a1b1655d3c492fdd18da414b0bd9edbd/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:23:09,336 INFO [StoreOpener-a1b1655d3c492fdd18da414b0bd9edbd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:09,338 INFO [StoreOpener-a1b1655d3c492fdd18da414b0bd9edbd-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:23:09,338 INFO [StoreOpener-a1b1655d3c492fdd18da414b0bd9edbd-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a1b1655d3c492fdd18da414b0bd9edbd columnFamilyName B 2024-12-09T17:23:09,338 DEBUG [StoreOpener-a1b1655d3c492fdd18da414b0bd9edbd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:09,338 INFO [StoreOpener-a1b1655d3c492fdd18da414b0bd9edbd-1 {}] regionserver.HStore(327): Store=a1b1655d3c492fdd18da414b0bd9edbd/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:23:09,339 INFO [StoreOpener-a1b1655d3c492fdd18da414b0bd9edbd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:09,340 INFO [StoreOpener-a1b1655d3c492fdd18da414b0bd9edbd-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:23:09,340 INFO [StoreOpener-a1b1655d3c492fdd18da414b0bd9edbd-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a1b1655d3c492fdd18da414b0bd9edbd columnFamilyName C 2024-12-09T17:23:09,340 DEBUG [StoreOpener-a1b1655d3c492fdd18da414b0bd9edbd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:09,341 INFO [StoreOpener-a1b1655d3c492fdd18da414b0bd9edbd-1 {}] regionserver.HStore(327): Store=a1b1655d3c492fdd18da414b0bd9edbd/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:23:09,341 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:09,342 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:09,342 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:09,344 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T17:23:09,346 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1085): writing seq id for a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:09,349 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T17:23:09,349 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1102): Opened a1b1655d3c492fdd18da414b0bd9edbd; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66767633, jitterRate=-0.00508473813533783}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T17:23:09,350 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1001): Region open journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:09,351 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., pid=127, masterSystemTime=1733764989321 2024-12-09T17:23:09,353 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:09,354 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:09,354 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=a1b1655d3c492fdd18da414b0bd9edbd, regionState=OPEN, openSeqNum=2, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:09,357 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-09T17:23:09,357 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; OpenRegionProcedure a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 in 188 msec 2024-12-09T17:23:09,359 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=126, resume processing ppid=125 2024-12-09T17:23:09,359 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, ppid=125, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a1b1655d3c492fdd18da414b0bd9edbd, ASSIGN in 346 msec 2024-12-09T17:23:09,359 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T17:23:09,360 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733764989359"}]},"ts":"1733764989359"} 2024-12-09T17:23:09,361 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-09T17:23:09,370 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T17:23:09,372 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2420 sec 2024-12-09T17:23:10,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-12-09T17:23:10,238 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 125 completed 2024-12-09T17:23:10,239 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x048087da to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@59daaa82 2024-12-09T17:23:10,286 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2aaa8c4a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:23:10,289 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:23:10,291 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57840, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:23:10,293 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T17:23:10,295 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34932, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T17:23:10,297 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x345fa4f7 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@38dd8644 2024-12-09T17:23:10,309 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@466b85c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:23:10,310 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x315a23ef to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@65e17c26 2024-12-09T17:23:10,319 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f3ee89e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:23:10,320 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d125972 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53fc02ba 2024-12-09T17:23:10,328 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b0e6a43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:23:10,329 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x134bfe32 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2011d733 2024-12-09T17:23:10,336 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8e5fd00, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:23:10,337 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17b55f2f to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39b3baa5 2024-12-09T17:23:10,345 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e195d6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:23:10,346 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x402e5def to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@14088aa9 2024-12-09T17:23:10,354 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23090be3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:23:10,355 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x10bda459 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@40302925 2024-12-09T17:23:10,361 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b8d64d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:23:10,362 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0657e1bf to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47ef9951 2024-12-09T17:23:10,370 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@784d683, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:23:10,370 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6dee2855 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@567011a8 2024-12-09T17:23:10,378 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7761f52b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:23:10,379 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x54e8a98a to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2430fee 2024-12-09T17:23:10,386 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a736a20, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:23:10,389 DEBUG [hconnection-0x7222a51b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:23:10,389 DEBUG [hconnection-0xa302383-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:23:10,389 DEBUG [hconnection-0x555867b5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:23:10,389 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:23:10,390 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57844, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:23:10,390 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57846, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:23:10,390 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57850, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:23:10,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-12-09T17:23:10,391 DEBUG [hconnection-0x579a1b79-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:23:10,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-09T17:23:10,391 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:23:10,391 DEBUG [hconnection-0x7df8c3ac-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:23:10,392 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:23:10,392 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:23:10,392 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57862, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:23:10,392 DEBUG [hconnection-0x36ad98c4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:23:10,392 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57866, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:23:10,393 DEBUG [hconnection-0x54c5e52e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:23:10,394 DEBUG [hconnection-0x3165e266-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:23:10,394 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57874, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:23:10,394 DEBUG [hconnection-0x70b84fd0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:23:10,394 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57890, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:23:10,395 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57904, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:23:10,395 DEBUG [hconnection-0x211a9e92-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:23:10,396 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57914, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:23:10,396 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57922, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:23:10,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:10,400 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a1b1655d3c492fdd18da414b0bd9edbd 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-09T17:23:10,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=A 2024-12-09T17:23:10,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:10,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=B 2024-12-09T17:23:10,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:10,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=C 2024-12-09T17:23:10,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:10,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:10,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:10,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765050408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:10,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765050408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:10,409 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:10,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765050409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:10,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:10,410 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:10,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765050409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:10,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765050409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:10,425 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/d999d8cb1a584c40bfad61ca232e1141 is 50, key is test_row_0/A:col10/1733764990396/Put/seqid=0 2024-12-09T17:23:10,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742257_1433 (size=12001) 2024-12-09T17:23:10,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-09T17:23:10,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:10,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765050510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:10,513 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:10,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:10,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765050510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:10,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765050510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:10,513 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:10,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:10,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765050510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:10,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765050510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:10,544 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:10,544 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-09T17:23:10,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:10,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:10,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:10,544 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:10,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:10,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:10,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-09T17:23:10,696 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:10,696 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-09T17:23:10,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:10,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:10,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:10,697 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:10,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:10,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:10,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:10,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:10,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765050713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:10,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765050713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:10,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:10,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:10,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765050714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:10,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765050714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:10,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:10,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765050715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:10,832 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/d999d8cb1a584c40bfad61ca232e1141 2024-12-09T17:23:10,848 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:10,849 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-09T17:23:10,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:10,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:10,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:10,849 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:10,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:10,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:10,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/5bca55b135764110a69ea5a1bf02624f is 50, key is test_row_0/B:col10/1733764990396/Put/seqid=0 2024-12-09T17:23:10,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742258_1434 (size=12001) 2024-12-09T17:23:10,858 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/5bca55b135764110a69ea5a1bf02624f 2024-12-09T17:23:10,875 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/6eb81226d5e144d791e8ae5548685a75 is 50, key is test_row_0/C:col10/1733764990396/Put/seqid=0 2024-12-09T17:23:10,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742259_1435 (size=12001) 2024-12-09T17:23:10,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-09T17:23:11,000 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:11,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-09T17:23:11,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:11,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:11,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:11,001 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:11,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:11,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:11,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:11,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765051018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:11,020 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:11,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765051018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:11,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:11,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765051018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:11,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:11,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765051020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:11,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:11,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765051020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:11,152 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:11,153 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-09T17:23:11,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:11,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:11,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:11,153 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:11,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:11,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:11,278 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/6eb81226d5e144d791e8ae5548685a75 2024-12-09T17:23:11,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/d999d8cb1a584c40bfad61ca232e1141 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/d999d8cb1a584c40bfad61ca232e1141 2024-12-09T17:23:11,285 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/d999d8cb1a584c40bfad61ca232e1141, entries=150, sequenceid=14, filesize=11.7 K 2024-12-09T17:23:11,285 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/5bca55b135764110a69ea5a1bf02624f as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/5bca55b135764110a69ea5a1bf02624f 2024-12-09T17:23:11,288 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/5bca55b135764110a69ea5a1bf02624f, entries=150, sequenceid=14, filesize=11.7 K 2024-12-09T17:23:11,289 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/6eb81226d5e144d791e8ae5548685a75 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/6eb81226d5e144d791e8ae5548685a75 2024-12-09T17:23:11,291 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/6eb81226d5e144d791e8ae5548685a75, entries=150, sequenceid=14, filesize=11.7 K 2024-12-09T17:23:11,292 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for a1b1655d3c492fdd18da414b0bd9edbd in 892ms, sequenceid=14, compaction requested=false 2024-12-09T17:23:11,292 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-09T17:23:11,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:11,304 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:11,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-09T17:23:11,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:11,305 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing a1b1655d3c492fdd18da414b0bd9edbd 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-09T17:23:11,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=A 2024-12-09T17:23:11,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:11,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=B 2024-12-09T17:23:11,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:11,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=C 2024-12-09T17:23:11,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:11,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/5de2ca174d4242bda0805d9dcd5a962a is 50, key is test_row_0/A:col10/1733764990408/Put/seqid=0 2024-12-09T17:23:11,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742260_1436 (size=12001) 2024-12-09T17:23:11,313 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/5de2ca174d4242bda0805d9dcd5a962a 2024-12-09T17:23:11,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/9a41f8f1c655418a9ff938c5909e71c9 is 50, key is test_row_0/B:col10/1733764990408/Put/seqid=0 2024-12-09T17:23:11,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742261_1437 (size=12001) 2024-12-09T17:23:11,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-09T17:23:11,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:11,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:11,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:11,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765051527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:11,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:11,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765051529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:11,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:11,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765051530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:11,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:11,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765051530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:11,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:11,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765051531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:11,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:11,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765051632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:11,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:11,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765051632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:11,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:11,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765051635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:11,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:11,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765051635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:11,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:11,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765051635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:11,729 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/9a41f8f1c655418a9ff938c5909e71c9 2024-12-09T17:23:11,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/5ce6e459fbbd4daf8f8debfc3e7d9cc5 is 50, key is test_row_0/C:col10/1733764990408/Put/seqid=0 2024-12-09T17:23:11,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742262_1438 (size=12001) 2024-12-09T17:23:11,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:11,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:11,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765051836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:11,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765051836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:11,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:11,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765051839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:11,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:11,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765051840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:11,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:11,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765051841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,137 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/5ce6e459fbbd4daf8f8debfc3e7d9cc5 2024-12-09T17:23:12,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:12,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765052140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:12,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765052140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:12,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765052141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:12,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765052144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:12,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765052145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/5de2ca174d4242bda0805d9dcd5a962a as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/5de2ca174d4242bda0805d9dcd5a962a 2024-12-09T17:23:12,169 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/5de2ca174d4242bda0805d9dcd5a962a, entries=150, sequenceid=39, filesize=11.7 K 2024-12-09T17:23:12,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/9a41f8f1c655418a9ff938c5909e71c9 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/9a41f8f1c655418a9ff938c5909e71c9 2024-12-09T17:23:12,172 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/9a41f8f1c655418a9ff938c5909e71c9, entries=150, sequenceid=39, filesize=11.7 K 2024-12-09T17:23:12,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/5ce6e459fbbd4daf8f8debfc3e7d9cc5 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/5ce6e459fbbd4daf8f8debfc3e7d9cc5 2024-12-09T17:23:12,176 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/5ce6e459fbbd4daf8f8debfc3e7d9cc5, entries=150, sequenceid=39, filesize=11.7 K 2024-12-09T17:23:12,176 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for a1b1655d3c492fdd18da414b0bd9edbd in 871ms, sequenceid=39, compaction requested=false 2024-12-09T17:23:12,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:12,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:12,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-12-09T17:23:12,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-12-09T17:23:12,178 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-12-09T17:23:12,178 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7850 sec 2024-12-09T17:23:12,179 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 1.7890 sec 2024-12-09T17:23:12,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-09T17:23:12,494 INFO [Thread-1946 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-12-09T17:23:12,495 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:23:12,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-12-09T17:23:12,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-09T17:23:12,496 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:23:12,496 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:23:12,497 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:23:12,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-09T17:23:12,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:12,647 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a1b1655d3c492fdd18da414b0bd9edbd 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-09T17:23:12,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=A 2024-12-09T17:23:12,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:12,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=B 2024-12-09T17:23:12,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:12,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=C 2024-12-09T17:23:12,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:12,648 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,648 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-09T17:23:12,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:12,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:12,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:12,648 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:12,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:12,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:12,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/955217a320ad43d08dda0dd78f322fc9 is 50, key is test_row_0/A:col10/1733764992646/Put/seqid=0 2024-12-09T17:23:12,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742263_1439 (size=14341) 2024-12-09T17:23:12,672 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:12,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765052665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:12,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765052666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,674 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:12,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765052671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,674 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:12,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765052671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,676 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:12,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765052672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:12,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765052773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:12,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765052773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:12,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765052774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:12,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765052775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,780 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:12,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765052777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-09T17:23:12,800 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,800 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-09T17:23:12,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:12,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:12,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:12,800 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:12,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:12,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:12,952 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,952 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-09T17:23:12,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:12,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:12,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:12,953 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:12,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:12,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:12,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:12,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765052977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:12,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765052977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:12,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765052978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:12,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765052978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:12,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:12,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765052981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:13,054 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/955217a320ad43d08dda0dd78f322fc9 2024-12-09T17:23:13,058 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/117cf219c9134019a8f8be3d54249da5 is 50, key is test_row_0/B:col10/1733764992646/Put/seqid=0 2024-12-09T17:23:13,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742264_1440 (size=12001) 2024-12-09T17:23:13,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-09T17:23:13,104 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:13,104 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-09T17:23:13,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:13,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:13,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:13,105 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:13,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:13,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:13,256 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:13,257 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-09T17:23:13,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:13,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:13,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:13,257 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:13,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:13,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:13,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:13,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765053279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:13,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:13,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765053282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:13,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:13,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765053283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:13,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:13,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765053283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:13,287 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:13,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765053284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:13,333 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-09T17:23:13,409 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:13,409 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-09T17:23:13,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:13,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:13,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:13,409 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:13,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:13,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:13,463 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/117cf219c9134019a8f8be3d54249da5 2024-12-09T17:23:13,467 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/e000c6dbec62473f89e3b0aacb2667e8 is 50, key is test_row_0/C:col10/1733764992646/Put/seqid=0 2024-12-09T17:23:13,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742265_1441 (size=12001) 2024-12-09T17:23:13,561 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:13,561 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-09T17:23:13,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:13,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:13,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:13,562 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:13,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:13,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:13,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-09T17:23:13,713 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:13,714 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-09T17:23:13,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:13,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:13,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:13,714 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:13,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:13,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:13,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:13,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765053783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:13,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:13,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765053785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:13,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:13,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765053787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:13,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:13,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765053788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:13,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:13,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765053789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:13,865 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:13,866 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-09T17:23:13,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:13,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:13,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:13,866 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:13,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:13,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:13,871 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/e000c6dbec62473f89e3b0aacb2667e8 2024-12-09T17:23:13,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/955217a320ad43d08dda0dd78f322fc9 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/955217a320ad43d08dda0dd78f322fc9 2024-12-09T17:23:13,876 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/955217a320ad43d08dda0dd78f322fc9, entries=200, sequenceid=51, filesize=14.0 K 2024-12-09T17:23:13,876 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/117cf219c9134019a8f8be3d54249da5 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/117cf219c9134019a8f8be3d54249da5 2024-12-09T17:23:13,879 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/117cf219c9134019a8f8be3d54249da5, entries=150, sequenceid=51, filesize=11.7 K 2024-12-09T17:23:13,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/e000c6dbec62473f89e3b0aacb2667e8 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/e000c6dbec62473f89e3b0aacb2667e8 2024-12-09T17:23:13,882 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/e000c6dbec62473f89e3b0aacb2667e8, entries=150, sequenceid=51, filesize=11.7 K 2024-12-09T17:23:13,882 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for a1b1655d3c492fdd18da414b0bd9edbd in 1235ms, sequenceid=51, compaction requested=true 2024-12-09T17:23:13,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:13,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1b1655d3c492fdd18da414b0bd9edbd:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:23:13,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:13,883 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:13,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1b1655d3c492fdd18da414b0bd9edbd:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:23:13,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:13,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1b1655d3c492fdd18da414b0bd9edbd:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:23:13,883 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:13,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:13,883 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:13,883 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:13,883 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): a1b1655d3c492fdd18da414b0bd9edbd/B is initiating minor compaction (all files) 2024-12-09T17:23:13,883 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): a1b1655d3c492fdd18da414b0bd9edbd/A is initiating minor compaction (all files) 2024-12-09T17:23:13,883 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1b1655d3c492fdd18da414b0bd9edbd/A in TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:13,883 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1b1655d3c492fdd18da414b0bd9edbd/B in TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:13,883 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/d999d8cb1a584c40bfad61ca232e1141, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/5de2ca174d4242bda0805d9dcd5a962a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/955217a320ad43d08dda0dd78f322fc9] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp, totalSize=37.4 K 2024-12-09T17:23:13,883 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/5bca55b135764110a69ea5a1bf02624f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/9a41f8f1c655418a9ff938c5909e71c9, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/117cf219c9134019a8f8be3d54249da5] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp, totalSize=35.2 K 2024-12-09T17:23:13,884 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting d999d8cb1a584c40bfad61ca232e1141, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733764990396 2024-12-09T17:23:13,884 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 5bca55b135764110a69ea5a1bf02624f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733764990396 2024-12-09T17:23:13,884 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a41f8f1c655418a9ff938c5909e71c9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733764990406 2024-12-09T17:23:13,884 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5de2ca174d4242bda0805d9dcd5a962a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733764990406 2024-12-09T17:23:13,884 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 955217a320ad43d08dda0dd78f322fc9, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733764991529 2024-12-09T17:23:13,884 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 117cf219c9134019a8f8be3d54249da5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733764991529 2024-12-09T17:23:13,889 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1b1655d3c492fdd18da414b0bd9edbd#B#compaction#370 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:13,889 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1b1655d3c492fdd18da414b0bd9edbd#A#compaction#369 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:13,890 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/f6ae036b7a3c4b28bf4aa254b8bbd2cd is 50, key is test_row_0/B:col10/1733764992646/Put/seqid=0 2024-12-09T17:23:13,890 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/1d6d09647cd64de081e014de10ae0d34 is 50, key is test_row_0/A:col10/1733764992646/Put/seqid=0 2024-12-09T17:23:13,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742266_1442 (size=12104) 2024-12-09T17:23:13,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742267_1443 (size=12104) 2024-12-09T17:23:14,018 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:14,018 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-09T17:23:14,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:14,018 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing a1b1655d3c492fdd18da414b0bd9edbd 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-09T17:23:14,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=A 2024-12-09T17:23:14,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:14,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=B 2024-12-09T17:23:14,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:14,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=C 2024-12-09T17:23:14,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:14,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/1fa2460493ad4facb8bd2a5355ee69ce is 50, key is test_row_0/A:col10/1733764992669/Put/seqid=0 2024-12-09T17:23:14,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742268_1444 (size=12001) 2024-12-09T17:23:14,310 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/1d6d09647cd64de081e014de10ae0d34 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/1d6d09647cd64de081e014de10ae0d34 2024-12-09T17:23:14,310 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/f6ae036b7a3c4b28bf4aa254b8bbd2cd as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/f6ae036b7a3c4b28bf4aa254b8bbd2cd 2024-12-09T17:23:14,313 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1b1655d3c492fdd18da414b0bd9edbd/B of a1b1655d3c492fdd18da414b0bd9edbd into f6ae036b7a3c4b28bf4aa254b8bbd2cd(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:14,313 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1b1655d3c492fdd18da414b0bd9edbd/A of a1b1655d3c492fdd18da414b0bd9edbd into 1d6d09647cd64de081e014de10ae0d34(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:14,313 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:14,313 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:14,313 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., storeName=a1b1655d3c492fdd18da414b0bd9edbd/B, priority=13, startTime=1733764993883; duration=0sec 2024-12-09T17:23:14,313 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., storeName=a1b1655d3c492fdd18da414b0bd9edbd/A, priority=13, startTime=1733764993882; duration=0sec 2024-12-09T17:23:14,313 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:14,313 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1b1655d3c492fdd18da414b0bd9edbd:B 2024-12-09T17:23:14,313 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:14,313 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1b1655d3c492fdd18da414b0bd9edbd:A 2024-12-09T17:23:14,313 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:14,314 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:14,314 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): a1b1655d3c492fdd18da414b0bd9edbd/C is initiating minor compaction (all files) 2024-12-09T17:23:14,314 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1b1655d3c492fdd18da414b0bd9edbd/C in TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:14,314 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/6eb81226d5e144d791e8ae5548685a75, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/5ce6e459fbbd4daf8f8debfc3e7d9cc5, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/e000c6dbec62473f89e3b0aacb2667e8] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp, totalSize=35.2 K 2024-12-09T17:23:14,315 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 6eb81226d5e144d791e8ae5548685a75, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733764990396 2024-12-09T17:23:14,315 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ce6e459fbbd4daf8f8debfc3e7d9cc5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733764990406 2024-12-09T17:23:14,315 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting e000c6dbec62473f89e3b0aacb2667e8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733764991529 2024-12-09T17:23:14,320 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1b1655d3c492fdd18da414b0bd9edbd#C#compaction#372 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:14,320 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/80e05f3bcdee4696bbba63785aebcadb is 50, key is test_row_0/C:col10/1733764992646/Put/seqid=0 2024-12-09T17:23:14,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742269_1445 (size=12104) 2024-12-09T17:23:14,429 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/1fa2460493ad4facb8bd2a5355ee69ce 2024-12-09T17:23:14,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/9929bb28fb5245cbbbd017360d07f8d8 is 50, key is test_row_0/B:col10/1733764992669/Put/seqid=0 2024-12-09T17:23:14,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742270_1446 (size=12001) 2024-12-09T17:23:14,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-09T17:23:14,727 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/80e05f3bcdee4696bbba63785aebcadb as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/80e05f3bcdee4696bbba63785aebcadb 2024-12-09T17:23:14,730 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1b1655d3c492fdd18da414b0bd9edbd/C of a1b1655d3c492fdd18da414b0bd9edbd into 80e05f3bcdee4696bbba63785aebcadb(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:14,730 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:14,730 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., storeName=a1b1655d3c492fdd18da414b0bd9edbd/C, priority=13, startTime=1733764993883; duration=0sec 2024-12-09T17:23:14,730 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:14,730 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1b1655d3c492fdd18da414b0bd9edbd:C 2024-12-09T17:23:14,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:14,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:14,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:14,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765054800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:14,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:14,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765054801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:14,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:14,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765054803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:14,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:14,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:14,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765054804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:14,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765054804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:14,837 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/9929bb28fb5245cbbbd017360d07f8d8 2024-12-09T17:23:14,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/eefb8649e78f46feaf6c1c07be124bf9 is 50, key is test_row_0/C:col10/1733764992669/Put/seqid=0 2024-12-09T17:23:14,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742271_1447 (size=12001) 2024-12-09T17:23:14,907 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:14,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765054904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:14,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:14,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765054906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:14,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:14,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765054907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:14,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:14,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765054910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:14,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:14,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765054910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:15,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:15,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765055109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:15,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:15,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765055110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:15,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:15,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765055110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:15,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:15,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765055113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:15,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:15,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765055113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:15,244 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/eefb8649e78f46feaf6c1c07be124bf9 2024-12-09T17:23:15,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/1fa2460493ad4facb8bd2a5355ee69ce as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/1fa2460493ad4facb8bd2a5355ee69ce 2024-12-09T17:23:15,249 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/1fa2460493ad4facb8bd2a5355ee69ce, entries=150, sequenceid=75, filesize=11.7 K 2024-12-09T17:23:15,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/9929bb28fb5245cbbbd017360d07f8d8 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/9929bb28fb5245cbbbd017360d07f8d8 2024-12-09T17:23:15,254 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/9929bb28fb5245cbbbd017360d07f8d8, entries=150, sequenceid=75, filesize=11.7 K 2024-12-09T17:23:15,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/eefb8649e78f46feaf6c1c07be124bf9 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/eefb8649e78f46feaf6c1c07be124bf9 2024-12-09T17:23:15,257 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/eefb8649e78f46feaf6c1c07be124bf9, entries=150, sequenceid=75, filesize=11.7 K 2024-12-09T17:23:15,258 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for a1b1655d3c492fdd18da414b0bd9edbd in 1240ms, sequenceid=75, compaction requested=false 2024-12-09T17:23:15,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:15,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:15,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-12-09T17:23:15,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-12-09T17:23:15,260 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-09T17:23:15,260 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7620 sec 2024-12-09T17:23:15,261 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 2.7650 sec 2024-12-09T17:23:15,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:15,414 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a1b1655d3c492fdd18da414b0bd9edbd 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-09T17:23:15,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=A 2024-12-09T17:23:15,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:15,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=B 2024-12-09T17:23:15,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:15,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=C 2024-12-09T17:23:15,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:15,419 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/9b6e859b14b6497ca534d5e9fbacbc41 is 50, key is test_row_0/A:col10/1733764995414/Put/seqid=0 2024-12-09T17:23:15,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742272_1448 (size=16681) 2024-12-09T17:23:15,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:15,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765055430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:15,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:15,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765055431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:15,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:15,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765055433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:15,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:15,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765055434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:15,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:15,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765055434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:15,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:15,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765055535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:15,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:15,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765055535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:15,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:15,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765055535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:15,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:15,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765055538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:15,541 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:15,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765055538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:15,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:15,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765055740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:15,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:15,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765055740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:15,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:15,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765055740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:15,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:15,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765055740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:15,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:15,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765055743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:15,823 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/9b6e859b14b6497ca534d5e9fbacbc41 2024-12-09T17:23:15,828 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/657c49180c6a4e468851d8504bc62676 is 50, key is test_row_0/B:col10/1733764995414/Put/seqid=0 2024-12-09T17:23:15,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742273_1449 (size=12001) 2024-12-09T17:23:16,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:16,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765056044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:16,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:16,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765056044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:16,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:16,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765056045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:16,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:16,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765056045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:16,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:16,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765056046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:16,232 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/657c49180c6a4e468851d8504bc62676 2024-12-09T17:23:16,237 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/8844fb6532324710a509d9f737865f85 is 50, key is test_row_0/C:col10/1733764995414/Put/seqid=0 2024-12-09T17:23:16,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742274_1450 (size=12001) 2024-12-09T17:23:16,549 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:16,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765056548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:16,549 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:16,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765056548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:16,550 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:16,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765056548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:16,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:16,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765056550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:16,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:16,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765056552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:16,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-09T17:23:16,600 INFO [Thread-1946 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-12-09T17:23:16,601 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:23:16,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-12-09T17:23:16,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-09T17:23:16,602 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:23:16,602 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:23:16,602 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:23:16,640 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/8844fb6532324710a509d9f737865f85 2024-12-09T17:23:16,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/9b6e859b14b6497ca534d5e9fbacbc41 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/9b6e859b14b6497ca534d5e9fbacbc41 2024-12-09T17:23:16,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/9b6e859b14b6497ca534d5e9fbacbc41, entries=250, sequenceid=93, filesize=16.3 K 2024-12-09T17:23:16,646 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/657c49180c6a4e468851d8504bc62676 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/657c49180c6a4e468851d8504bc62676 2024-12-09T17:23:16,648 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/657c49180c6a4e468851d8504bc62676, entries=150, sequenceid=93, filesize=11.7 K 2024-12-09T17:23:16,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/8844fb6532324710a509d9f737865f85 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/8844fb6532324710a509d9f737865f85 2024-12-09T17:23:16,651 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/8844fb6532324710a509d9f737865f85, entries=150, sequenceid=93, filesize=11.7 K 2024-12-09T17:23:16,652 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for a1b1655d3c492fdd18da414b0bd9edbd in 1238ms, sequenceid=93, compaction requested=true 2024-12-09T17:23:16,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:16,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1b1655d3c492fdd18da414b0bd9edbd:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:23:16,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:16,652 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:16,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1b1655d3c492fdd18da414b0bd9edbd:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:23:16,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:16,652 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:16,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1b1655d3c492fdd18da414b0bd9edbd:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:23:16,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:16,653 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40786 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:16,653 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:16,653 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): a1b1655d3c492fdd18da414b0bd9edbd/A is initiating minor compaction (all files) 2024-12-09T17:23:16,653 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): a1b1655d3c492fdd18da414b0bd9edbd/B is initiating minor compaction (all files) 2024-12-09T17:23:16,653 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1b1655d3c492fdd18da414b0bd9edbd/B in TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:16,653 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1b1655d3c492fdd18da414b0bd9edbd/A in TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:16,653 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/f6ae036b7a3c4b28bf4aa254b8bbd2cd, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/9929bb28fb5245cbbbd017360d07f8d8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/657c49180c6a4e468851d8504bc62676] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp, totalSize=35.3 K 2024-12-09T17:23:16,653 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/1d6d09647cd64de081e014de10ae0d34, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/1fa2460493ad4facb8bd2a5355ee69ce, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/9b6e859b14b6497ca534d5e9fbacbc41] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp, totalSize=39.8 K 2024-12-09T17:23:16,653 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d6d09647cd64de081e014de10ae0d34, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733764991529 2024-12-09T17:23:16,653 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting f6ae036b7a3c4b28bf4aa254b8bbd2cd, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733764991529 2024-12-09T17:23:16,653 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9929bb28fb5245cbbbd017360d07f8d8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733764992664 2024-12-09T17:23:16,653 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1fa2460493ad4facb8bd2a5355ee69ce, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733764992664 2024-12-09T17:23:16,653 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 657c49180c6a4e468851d8504bc62676, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733764994802 2024-12-09T17:23:16,653 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b6e859b14b6497ca534d5e9fbacbc41, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733764994796 2024-12-09T17:23:16,664 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1b1655d3c492fdd18da414b0bd9edbd#A#compaction#378 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:16,664 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1b1655d3c492fdd18da414b0bd9edbd#B#compaction#379 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:16,665 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/33a5c88ed64c44dcaad9db5857a3e9d2 is 50, key is test_row_0/A:col10/1733764995414/Put/seqid=0 2024-12-09T17:23:16,665 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/5021e07632d14e5b84b0dbc1f7d5614a is 50, key is test_row_0/B:col10/1733764995414/Put/seqid=0 2024-12-09T17:23:16,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742275_1451 (size=12207) 2024-12-09T17:23:16,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742276_1452 (size=12207) 2024-12-09T17:23:16,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-09T17:23:16,753 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:16,754 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-09T17:23:16,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:16,754 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing a1b1655d3c492fdd18da414b0bd9edbd 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-09T17:23:16,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=A 2024-12-09T17:23:16,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:16,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=B 2024-12-09T17:23:16,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:16,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=C 2024-12-09T17:23:16,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:16,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/bbc03db6d14d493d9ab250b869f67bfd is 50, key is test_row_0/A:col10/1733764995430/Put/seqid=0 2024-12-09T17:23:16,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742277_1453 (size=12001) 2024-12-09T17:23:16,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-09T17:23:17,076 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/33a5c88ed64c44dcaad9db5857a3e9d2 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/33a5c88ed64c44dcaad9db5857a3e9d2 2024-12-09T17:23:17,078 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1b1655d3c492fdd18da414b0bd9edbd/A of a1b1655d3c492fdd18da414b0bd9edbd into 33a5c88ed64c44dcaad9db5857a3e9d2(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:17,079 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:17,079 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., storeName=a1b1655d3c492fdd18da414b0bd9edbd/A, priority=13, startTime=1733764996652; duration=0sec 2024-12-09T17:23:17,079 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:17,079 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1b1655d3c492fdd18da414b0bd9edbd:A 2024-12-09T17:23:17,079 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:17,079 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:17,079 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): a1b1655d3c492fdd18da414b0bd9edbd/C is initiating minor compaction (all files) 2024-12-09T17:23:17,079 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1b1655d3c492fdd18da414b0bd9edbd/C in TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:17,080 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/80e05f3bcdee4696bbba63785aebcadb, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/eefb8649e78f46feaf6c1c07be124bf9, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/8844fb6532324710a509d9f737865f85] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp, totalSize=35.3 K 2024-12-09T17:23:17,080 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80e05f3bcdee4696bbba63785aebcadb, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733764991529 2024-12-09T17:23:17,080 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting eefb8649e78f46feaf6c1c07be124bf9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733764992664 2024-12-09T17:23:17,080 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8844fb6532324710a509d9f737865f85, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733764994802 2024-12-09T17:23:17,082 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/5021e07632d14e5b84b0dbc1f7d5614a as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/5021e07632d14e5b84b0dbc1f7d5614a 2024-12-09T17:23:17,085 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1b1655d3c492fdd18da414b0bd9edbd#C#compaction#381 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:17,086 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/7ac79cf2d6344d78865942dd1df4d463 is 50, key is test_row_0/C:col10/1733764995414/Put/seqid=0 2024-12-09T17:23:17,086 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1b1655d3c492fdd18da414b0bd9edbd/B of a1b1655d3c492fdd18da414b0bd9edbd into 5021e07632d14e5b84b0dbc1f7d5614a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:17,086 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:17,086 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., storeName=a1b1655d3c492fdd18da414b0bd9edbd/B, priority=13, startTime=1733764996652; duration=0sec 2024-12-09T17:23:17,086 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:17,086 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1b1655d3c492fdd18da414b0bd9edbd:B 2024-12-09T17:23:17,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742278_1454 (size=12207) 2024-12-09T17:23:17,190 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/bbc03db6d14d493d9ab250b869f67bfd 2024-12-09T17:23:17,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/13aad20a0698444e973fd5c553a20ef2 is 50, key is test_row_0/B:col10/1733764995430/Put/seqid=0 2024-12-09T17:23:17,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742279_1455 (size=12001) 2024-12-09T17:23:17,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-09T17:23:17,501 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/7ac79cf2d6344d78865942dd1df4d463 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/7ac79cf2d6344d78865942dd1df4d463 2024-12-09T17:23:17,504 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1b1655d3c492fdd18da414b0bd9edbd/C of a1b1655d3c492fdd18da414b0bd9edbd into 7ac79cf2d6344d78865942dd1df4d463(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:17,504 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:17,504 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., storeName=a1b1655d3c492fdd18da414b0bd9edbd/C, priority=13, startTime=1733764996652; duration=0sec 2024-12-09T17:23:17,504 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:17,504 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1b1655d3c492fdd18da414b0bd9edbd:C 2024-12-09T17:23:17,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:17,553 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:17,573 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:17,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765057567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:17,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:17,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765057568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:17,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:17,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765057568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:17,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:17,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765057572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:17,576 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:17,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765057573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:17,598 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/13aad20a0698444e973fd5c553a20ef2 2024-12-09T17:23:17,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/a9973cba35514fdc8540c2f4393576b6 is 50, key is test_row_0/C:col10/1733764995430/Put/seqid=0 2024-12-09T17:23:17,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742280_1456 (size=12001) 2024-12-09T17:23:17,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:17,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765057674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:17,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:17,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765057675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:17,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:17,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765057675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:17,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:17,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765057675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:17,679 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:17,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765057677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:17,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-09T17:23:17,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:17,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765057877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:17,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:17,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765057878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:17,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:17,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765057879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:17,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:17,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765057879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:17,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:17,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765057880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:18,008 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/a9973cba35514fdc8540c2f4393576b6 2024-12-09T17:23:18,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/bbc03db6d14d493d9ab250b869f67bfd as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/bbc03db6d14d493d9ab250b869f67bfd 2024-12-09T17:23:18,013 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/bbc03db6d14d493d9ab250b869f67bfd, entries=150, sequenceid=114, filesize=11.7 K 2024-12-09T17:23:18,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/13aad20a0698444e973fd5c553a20ef2 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/13aad20a0698444e973fd5c553a20ef2 2024-12-09T17:23:18,016 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/13aad20a0698444e973fd5c553a20ef2, entries=150, sequenceid=114, filesize=11.7 K 2024-12-09T17:23:18,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/a9973cba35514fdc8540c2f4393576b6 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/a9973cba35514fdc8540c2f4393576b6 2024-12-09T17:23:18,018 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/a9973cba35514fdc8540c2f4393576b6, entries=150, sequenceid=114, filesize=11.7 K 2024-12-09T17:23:18,019 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for a1b1655d3c492fdd18da414b0bd9edbd in 1265ms, sequenceid=114, compaction requested=false 2024-12-09T17:23:18,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:18,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:18,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-12-09T17:23:18,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-12-09T17:23:18,021 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-09T17:23:18,021 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4180 sec 2024-12-09T17:23:18,021 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 1.4200 sec 2024-12-09T17:23:18,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:18,184 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a1b1655d3c492fdd18da414b0bd9edbd 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-09T17:23:18,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=A 2024-12-09T17:23:18,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:18,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=B 2024-12-09T17:23:18,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:18,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=C 2024-12-09T17:23:18,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:18,188 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/087d0818d8ba4569b81ffb02cda83014 is 50, key is test_row_0/A:col10/1733764998183/Put/seqid=0 2024-12-09T17:23:18,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742281_1457 (size=14541) 2024-12-09T17:23:18,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:18,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765058199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:18,204 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:18,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765058200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:18,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:18,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765058202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:18,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:18,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765058202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:18,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:18,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765058202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:18,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:18,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765058304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:18,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:18,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765058305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:18,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:18,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765058307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:18,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:18,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765058307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:18,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:18,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765058307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:18,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:18,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765058508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:18,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:18,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765058508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:18,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:18,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765058509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:18,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:18,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765058510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:18,515 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:18,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765058512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:18,591 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/087d0818d8ba4569b81ffb02cda83014 2024-12-09T17:23:18,597 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/004545bffafe4e8cb62e8375936fed95 is 50, key is test_row_0/B:col10/1733764998183/Put/seqid=0 2024-12-09T17:23:18,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742282_1458 (size=12151) 2024-12-09T17:23:18,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-09T17:23:18,705 INFO [Thread-1946 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-12-09T17:23:18,706 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:23:18,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-12-09T17:23:18,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-09T17:23:18,707 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:23:18,708 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:23:18,708 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:23:18,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-09T17:23:18,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:18,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765058811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:18,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:18,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765058812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:18,815 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:18,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765058812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:18,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:18,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765058813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:18,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:18,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765058815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:18,859 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:18,859 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-09T17:23:18,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:18,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:18,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:18,859 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:18,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:18,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:19,000 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/004545bffafe4e8cb62e8375936fed95 2024-12-09T17:23:19,005 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/3c33f976a4444640b5d1894ef4cd29f8 is 50, key is test_row_0/C:col10/1733764998183/Put/seqid=0 2024-12-09T17:23:19,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742283_1459 (size=12151) 2024-12-09T17:23:19,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-09T17:23:19,011 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:19,011 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-09T17:23:19,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:19,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:19,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:19,011 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:19,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:19,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:19,163 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:19,163 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-09T17:23:19,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:19,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:19,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:19,164 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:19,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:19,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:19,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-09T17:23:19,315 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:19,316 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-09T17:23:19,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:19,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:19,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:19,316 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:19,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:19,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:19,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:19,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765059315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:19,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:19,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765059316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:19,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:19,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765059317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:19,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:19,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765059317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:19,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:19,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765059318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:19,408 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/3c33f976a4444640b5d1894ef4cd29f8 2024-12-09T17:23:19,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/087d0818d8ba4569b81ffb02cda83014 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/087d0818d8ba4569b81ffb02cda83014 2024-12-09T17:23:19,414 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/087d0818d8ba4569b81ffb02cda83014, entries=200, sequenceid=134, filesize=14.2 K 2024-12-09T17:23:19,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/004545bffafe4e8cb62e8375936fed95 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/004545bffafe4e8cb62e8375936fed95 2024-12-09T17:23:19,416 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/004545bffafe4e8cb62e8375936fed95, entries=150, sequenceid=134, filesize=11.9 K 2024-12-09T17:23:19,417 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/3c33f976a4444640b5d1894ef4cd29f8 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/3c33f976a4444640b5d1894ef4cd29f8 2024-12-09T17:23:19,419 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/3c33f976a4444640b5d1894ef4cd29f8, entries=150, sequenceid=134, filesize=11.9 K 2024-12-09T17:23:19,420 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for a1b1655d3c492fdd18da414b0bd9edbd in 1236ms, sequenceid=134, compaction requested=true 2024-12-09T17:23:19,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:19,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1b1655d3c492fdd18da414b0bd9edbd:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:23:19,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:19,420 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:19,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1b1655d3c492fdd18da414b0bd9edbd:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:23:19,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:19,420 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:19,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1b1655d3c492fdd18da414b0bd9edbd:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:23:19,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:19,421 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38749 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:19,421 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:19,421 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): a1b1655d3c492fdd18da414b0bd9edbd/B is initiating minor compaction (all files) 2024-12-09T17:23:19,421 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): a1b1655d3c492fdd18da414b0bd9edbd/A is initiating minor compaction (all files) 2024-12-09T17:23:19,421 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1b1655d3c492fdd18da414b0bd9edbd/A in TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:19,421 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1b1655d3c492fdd18da414b0bd9edbd/B in TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:19,421 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/33a5c88ed64c44dcaad9db5857a3e9d2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/bbc03db6d14d493d9ab250b869f67bfd, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/087d0818d8ba4569b81ffb02cda83014] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp, totalSize=37.8 K 2024-12-09T17:23:19,421 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/5021e07632d14e5b84b0dbc1f7d5614a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/13aad20a0698444e973fd5c553a20ef2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/004545bffafe4e8cb62e8375936fed95] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp, totalSize=35.5 K 2024-12-09T17:23:19,421 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33a5c88ed64c44dcaad9db5857a3e9d2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733764994802 2024-12-09T17:23:19,421 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 5021e07632d14e5b84b0dbc1f7d5614a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733764994802 2024-12-09T17:23:19,421 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 13aad20a0698444e973fd5c553a20ef2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733764995430 2024-12-09T17:23:19,421 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting bbc03db6d14d493d9ab250b869f67bfd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733764995430 2024-12-09T17:23:19,421 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 087d0818d8ba4569b81ffb02cda83014, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733764997567 2024-12-09T17:23:19,421 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 004545bffafe4e8cb62e8375936fed95, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733764997567 2024-12-09T17:23:19,428 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1b1655d3c492fdd18da414b0bd9edbd#A#compaction#387 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:19,429 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/bc9abb1fa251464a9e288a924d2fe87c is 50, key is test_row_0/A:col10/1733764998183/Put/seqid=0 2024-12-09T17:23:19,429 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1b1655d3c492fdd18da414b0bd9edbd#B#compaction#388 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:19,430 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/8c2fbdbc69924def85f175b37065410e is 50, key is test_row_0/B:col10/1733764998183/Put/seqid=0 2024-12-09T17:23:19,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742284_1460 (size=12459) 2024-12-09T17:23:19,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742285_1461 (size=12459) 2024-12-09T17:23:19,435 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/bc9abb1fa251464a9e288a924d2fe87c as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/bc9abb1fa251464a9e288a924d2fe87c 2024-12-09T17:23:19,438 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1b1655d3c492fdd18da414b0bd9edbd/A of a1b1655d3c492fdd18da414b0bd9edbd into bc9abb1fa251464a9e288a924d2fe87c(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:19,438 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:19,438 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., storeName=a1b1655d3c492fdd18da414b0bd9edbd/A, priority=13, startTime=1733764999420; duration=0sec 2024-12-09T17:23:19,438 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:19,438 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1b1655d3c492fdd18da414b0bd9edbd:A 2024-12-09T17:23:19,438 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:19,439 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:19,439 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): a1b1655d3c492fdd18da414b0bd9edbd/C is initiating minor compaction (all files) 2024-12-09T17:23:19,439 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1b1655d3c492fdd18da414b0bd9edbd/C in TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:19,439 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/7ac79cf2d6344d78865942dd1df4d463, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/a9973cba35514fdc8540c2f4393576b6, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/3c33f976a4444640b5d1894ef4cd29f8] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp, totalSize=35.5 K 2024-12-09T17:23:19,440 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ac79cf2d6344d78865942dd1df4d463, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733764994802 2024-12-09T17:23:19,440 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a9973cba35514fdc8540c2f4393576b6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733764995430 2024-12-09T17:23:19,440 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c33f976a4444640b5d1894ef4cd29f8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733764997567 2024-12-09T17:23:19,445 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1b1655d3c492fdd18da414b0bd9edbd#C#compaction#389 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:19,446 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/2f02e40eeee7491d83df80bfa24f736f is 50, key is test_row_0/C:col10/1733764998183/Put/seqid=0 2024-12-09T17:23:19,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742286_1462 (size=12459) 2024-12-09T17:23:19,467 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:19,468 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-09T17:23:19,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:19,468 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing a1b1655d3c492fdd18da414b0bd9edbd 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-09T17:23:19,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=A 2024-12-09T17:23:19,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:19,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=B 2024-12-09T17:23:19,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:19,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=C 2024-12-09T17:23:19,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:19,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/f365f92e925648d1a486223acd447117 is 50, key is test_row_0/A:col10/1733764998199/Put/seqid=0 2024-12-09T17:23:19,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742287_1463 (size=12151) 2024-12-09T17:23:19,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-09T17:23:19,837 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/8c2fbdbc69924def85f175b37065410e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/8c2fbdbc69924def85f175b37065410e 2024-12-09T17:23:19,840 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1b1655d3c492fdd18da414b0bd9edbd/B of a1b1655d3c492fdd18da414b0bd9edbd into 8c2fbdbc69924def85f175b37065410e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:19,840 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:19,840 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., storeName=a1b1655d3c492fdd18da414b0bd9edbd/B, priority=13, startTime=1733764999420; duration=0sec 2024-12-09T17:23:19,840 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:19,840 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1b1655d3c492fdd18da414b0bd9edbd:B 2024-12-09T17:23:19,864 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/2f02e40eeee7491d83df80bfa24f736f as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/2f02e40eeee7491d83df80bfa24f736f 2024-12-09T17:23:19,867 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1b1655d3c492fdd18da414b0bd9edbd/C of a1b1655d3c492fdd18da414b0bd9edbd into 2f02e40eeee7491d83df80bfa24f736f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:19,868 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:19,868 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., storeName=a1b1655d3c492fdd18da414b0bd9edbd/C, priority=13, startTime=1733764999420; duration=0sec 2024-12-09T17:23:19,868 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:19,868 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1b1655d3c492fdd18da414b0bd9edbd:C 2024-12-09T17:23:19,875 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/f365f92e925648d1a486223acd447117 2024-12-09T17:23:19,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/569e6b61f2d343288ca549f40974877d is 50, key is test_row_0/B:col10/1733764998199/Put/seqid=0 2024-12-09T17:23:19,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742288_1464 (size=12151) 2024-12-09T17:23:20,284 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/569e6b61f2d343288ca549f40974877d 2024-12-09T17:23:20,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/cfd89e8128a54382b9ba60ff917fa46c is 50, key is test_row_0/C:col10/1733764998199/Put/seqid=0 2024-12-09T17:23:20,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742289_1465 (size=12151) 2024-12-09T17:23:20,291 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/cfd89e8128a54382b9ba60ff917fa46c 2024-12-09T17:23:20,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/f365f92e925648d1a486223acd447117 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/f365f92e925648d1a486223acd447117 2024-12-09T17:23:20,297 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/f365f92e925648d1a486223acd447117, entries=150, sequenceid=155, filesize=11.9 K 2024-12-09T17:23:20,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/569e6b61f2d343288ca549f40974877d as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/569e6b61f2d343288ca549f40974877d 2024-12-09T17:23:20,300 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/569e6b61f2d343288ca549f40974877d, entries=150, sequenceid=155, filesize=11.9 K 2024-12-09T17:23:20,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/cfd89e8128a54382b9ba60ff917fa46c as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/cfd89e8128a54382b9ba60ff917fa46c 2024-12-09T17:23:20,303 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/cfd89e8128a54382b9ba60ff917fa46c, entries=150, sequenceid=155, filesize=11.9 K 2024-12-09T17:23:20,304 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=0 B/0 for a1b1655d3c492fdd18da414b0bd9edbd in 836ms, sequenceid=155, compaction requested=false 2024-12-09T17:23:20,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:20,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:20,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-12-09T17:23:20,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-12-09T17:23:20,306 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-12-09T17:23:20,306 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5970 sec 2024-12-09T17:23:20,306 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 1.6000 sec 2024-12-09T17:23:20,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:20,329 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a1b1655d3c492fdd18da414b0bd9edbd 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-09T17:23:20,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=A 2024-12-09T17:23:20,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:20,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=B 2024-12-09T17:23:20,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:20,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=C 2024-12-09T17:23:20,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:20,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/573550c320ed474dbb86dbab848569d4 is 50, key is test_row_0/A:col10/1733765000329/Put/seqid=0 2024-12-09T17:23:20,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742290_1466 (size=16931) 2024-12-09T17:23:20,336 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/573550c320ed474dbb86dbab848569d4 2024-12-09T17:23:20,345 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/f8517ca746ec4bc5a8760b529a725ca8 is 50, key is test_row_0/B:col10/1733765000329/Put/seqid=0 2024-12-09T17:23:20,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742291_1467 (size=12151) 2024-12-09T17:23:20,351 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:20,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765060344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:20,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:20,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765060348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:20,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:20,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765060349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:20,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:20,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765060349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:20,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:20,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765060351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:20,454 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:20,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765060452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:20,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:20,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765060454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:20,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:20,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765060454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:20,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:20,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765060454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:20,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:20,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765060454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:20,657 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:20,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765060656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:20,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:20,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765060658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:20,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:20,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765060658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:20,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:20,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765060658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:20,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:20,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765060658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:20,748 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/f8517ca746ec4bc5a8760b529a725ca8 2024-12-09T17:23:20,754 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/a8c3746177994393a07391a4099bd1bc is 50, key is test_row_0/C:col10/1733765000329/Put/seqid=0 2024-12-09T17:23:20,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742292_1468 (size=12151) 2024-12-09T17:23:20,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-09T17:23:20,810 INFO [Thread-1946 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-12-09T17:23:20,811 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:23:20,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-12-09T17:23:20,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-09T17:23:20,813 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:23:20,813 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:23:20,813 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:23:20,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-09T17:23:20,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:20,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765060958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:20,964 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:20,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:20,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765060961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:20,964 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-09T17:23:20,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:20,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:20,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765060961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:20,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:20,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:20,965 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:20,965 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:20,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765060963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:20,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:20,965 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:20,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765060963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:20,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:21,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-09T17:23:21,116 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:21,117 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-09T17:23:21,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:21,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:21,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:21,117 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:21,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:21,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:21,163 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/a8c3746177994393a07391a4099bd1bc 2024-12-09T17:23:21,166 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/573550c320ed474dbb86dbab848569d4 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/573550c320ed474dbb86dbab848569d4 2024-12-09T17:23:21,172 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/573550c320ed474dbb86dbab848569d4, entries=250, sequenceid=169, filesize=16.5 K 2024-12-09T17:23:21,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/f8517ca746ec4bc5a8760b529a725ca8 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/f8517ca746ec4bc5a8760b529a725ca8 2024-12-09T17:23:21,176 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/f8517ca746ec4bc5a8760b529a725ca8, entries=150, sequenceid=169, filesize=11.9 K 2024-12-09T17:23:21,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/a8c3746177994393a07391a4099bd1bc as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/a8c3746177994393a07391a4099bd1bc 2024-12-09T17:23:21,179 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/a8c3746177994393a07391a4099bd1bc, entries=150, sequenceid=169, filesize=11.9 K 2024-12-09T17:23:21,180 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for a1b1655d3c492fdd18da414b0bd9edbd in 851ms, sequenceid=169, compaction requested=true 2024-12-09T17:23:21,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:21,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1b1655d3c492fdd18da414b0bd9edbd:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:23:21,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:21,180 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:21,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1b1655d3c492fdd18da414b0bd9edbd:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:23:21,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:21,180 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:21,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1b1655d3c492fdd18da414b0bd9edbd:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:23:21,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:21,181 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:21,181 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41541 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:21,181 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): a1b1655d3c492fdd18da414b0bd9edbd/A is initiating minor compaction (all files) 2024-12-09T17:23:21,181 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): a1b1655d3c492fdd18da414b0bd9edbd/B is initiating minor compaction (all files) 2024-12-09T17:23:21,181 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1b1655d3c492fdd18da414b0bd9edbd/B in TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:21,181 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1b1655d3c492fdd18da414b0bd9edbd/A in TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:21,181 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/8c2fbdbc69924def85f175b37065410e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/569e6b61f2d343288ca549f40974877d, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/f8517ca746ec4bc5a8760b529a725ca8] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp, totalSize=35.9 K 2024-12-09T17:23:21,181 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/bc9abb1fa251464a9e288a924d2fe87c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/f365f92e925648d1a486223acd447117, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/573550c320ed474dbb86dbab848569d4] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp, totalSize=40.6 K 2024-12-09T17:23:21,181 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c2fbdbc69924def85f175b37065410e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733764997567 2024-12-09T17:23:21,181 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc9abb1fa251464a9e288a924d2fe87c, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733764997567 2024-12-09T17:23:21,182 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 569e6b61f2d343288ca549f40974877d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733764998199 2024-12-09T17:23:21,182 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting f365f92e925648d1a486223acd447117, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733764998199 2024-12-09T17:23:21,182 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting f8517ca746ec4bc5a8760b529a725ca8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733765000327 2024-12-09T17:23:21,182 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 573550c320ed474dbb86dbab848569d4, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733765000323 2024-12-09T17:23:21,187 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1b1655d3c492fdd18da414b0bd9edbd#B#compaction#396 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:21,187 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1b1655d3c492fdd18da414b0bd9edbd#A#compaction#397 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:21,188 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/4939c9b7295d4341b3d211e03d7cbf10 is 50, key is test_row_0/B:col10/1733765000329/Put/seqid=0 2024-12-09T17:23:21,188 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/0afd9d7681c74b4a8f323e0f236a5097 is 50, key is test_row_0/A:col10/1733765000329/Put/seqid=0 2024-12-09T17:23:21,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742294_1470 (size=12561) 2024-12-09T17:23:21,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742293_1469 (size=12561) 2024-12-09T17:23:21,269 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:21,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-09T17:23:21,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:21,269 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing a1b1655d3c492fdd18da414b0bd9edbd 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-09T17:23:21,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=A 2024-12-09T17:23:21,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:21,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=B 2024-12-09T17:23:21,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:21,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=C 2024-12-09T17:23:21,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:21,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/32a22ce842824483b7b59ec48b0c8ad8 is 50, key is test_row_0/A:col10/1733765000348/Put/seqid=0 2024-12-09T17:23:21,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742295_1471 (size=12151) 2024-12-09T17:23:21,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-09T17:23:21,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:21,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:21,482 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:21,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765061477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:21,482 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:21,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765061479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:21,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:21,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765061480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:21,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:21,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765061480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:21,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:21,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765061481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:21,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:21,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765061583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:21,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:21,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765061583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:21,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:21,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765061584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:21,591 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:21,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765061585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:21,594 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/4939c9b7295d4341b3d211e03d7cbf10 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/4939c9b7295d4341b3d211e03d7cbf10 2024-12-09T17:23:21,597 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1b1655d3c492fdd18da414b0bd9edbd/B of a1b1655d3c492fdd18da414b0bd9edbd into 4939c9b7295d4341b3d211e03d7cbf10(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:21,597 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:21,597 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., storeName=a1b1655d3c492fdd18da414b0bd9edbd/B, priority=13, startTime=1733765001180; duration=0sec 2024-12-09T17:23:21,598 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:21,598 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1b1655d3c492fdd18da414b0bd9edbd:B 2024-12-09T17:23:21,598 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:21,598 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:21,598 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): a1b1655d3c492fdd18da414b0bd9edbd/C is initiating minor compaction (all files) 2024-12-09T17:23:21,599 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1b1655d3c492fdd18da414b0bd9edbd/C in TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:21,599 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/2f02e40eeee7491d83df80bfa24f736f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/cfd89e8128a54382b9ba60ff917fa46c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/a8c3746177994393a07391a4099bd1bc] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp, totalSize=35.9 K 2024-12-09T17:23:21,599 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f02e40eeee7491d83df80bfa24f736f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733764997567 2024-12-09T17:23:21,600 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting cfd89e8128a54382b9ba60ff917fa46c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733764998199 2024-12-09T17:23:21,600 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting a8c3746177994393a07391a4099bd1bc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733765000327 2024-12-09T17:23:21,601 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/0afd9d7681c74b4a8f323e0f236a5097 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/0afd9d7681c74b4a8f323e0f236a5097 2024-12-09T17:23:21,605 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1b1655d3c492fdd18da414b0bd9edbd/A of a1b1655d3c492fdd18da414b0bd9edbd into 0afd9d7681c74b4a8f323e0f236a5097(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:21,605 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:21,605 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., storeName=a1b1655d3c492fdd18da414b0bd9edbd/A, priority=13, startTime=1733765001180; duration=0sec 2024-12-09T17:23:21,605 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:21,605 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1b1655d3c492fdd18da414b0bd9edbd:A 2024-12-09T17:23:21,605 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1b1655d3c492fdd18da414b0bd9edbd#C#compaction#399 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:21,606 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/336d18bf9a8e45b3978cb9401fb8920a is 50, key is test_row_0/C:col10/1733765000329/Put/seqid=0 2024-12-09T17:23:21,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742296_1472 (size=12561) 2024-12-09T17:23:21,676 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/32a22ce842824483b7b59ec48b0c8ad8 2024-12-09T17:23:21,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/555869ff0d7b488b8d2c544039578c8b is 50, key is test_row_0/B:col10/1733765000348/Put/seqid=0 2024-12-09T17:23:21,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742297_1473 (size=12151) 2024-12-09T17:23:21,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:21,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765061789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:21,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:21,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765061790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:21,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:21,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765061790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:21,795 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:21,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765061793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:21,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-09T17:23:22,020 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/336d18bf9a8e45b3978cb9401fb8920a as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/336d18bf9a8e45b3978cb9401fb8920a 2024-12-09T17:23:22,023 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1b1655d3c492fdd18da414b0bd9edbd/C of a1b1655d3c492fdd18da414b0bd9edbd into 336d18bf9a8e45b3978cb9401fb8920a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:22,023 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:22,024 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., storeName=a1b1655d3c492fdd18da414b0bd9edbd/C, priority=13, startTime=1733765001180; duration=0sec 2024-12-09T17:23:22,024 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:22,024 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1b1655d3c492fdd18da414b0bd9edbd:C 2024-12-09T17:23:22,085 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/555869ff0d7b488b8d2c544039578c8b 2024-12-09T17:23:22,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/e231eef083e747fface2285bc058e5bc is 50, key is test_row_0/C:col10/1733765000348/Put/seqid=0 2024-12-09T17:23:22,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:22,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765062091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:22,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:22,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765062091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:22,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:22,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765062094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:22,099 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:22,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765062096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:22,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742298_1474 (size=12151) 2024-12-09T17:23:22,491 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:22,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765062490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:22,504 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/e231eef083e747fface2285bc058e5bc 2024-12-09T17:23:22,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/32a22ce842824483b7b59ec48b0c8ad8 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/32a22ce842824483b7b59ec48b0c8ad8 2024-12-09T17:23:22,510 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/32a22ce842824483b7b59ec48b0c8ad8, entries=150, sequenceid=193, filesize=11.9 K 2024-12-09T17:23:22,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/555869ff0d7b488b8d2c544039578c8b as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/555869ff0d7b488b8d2c544039578c8b 2024-12-09T17:23:22,513 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/555869ff0d7b488b8d2c544039578c8b, entries=150, sequenceid=193, filesize=11.9 K 2024-12-09T17:23:22,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/e231eef083e747fface2285bc058e5bc as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/e231eef083e747fface2285bc058e5bc 2024-12-09T17:23:22,516 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/e231eef083e747fface2285bc058e5bc, entries=150, sequenceid=193, filesize=11.9 K 2024-12-09T17:23:22,516 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for a1b1655d3c492fdd18da414b0bd9edbd in 1247ms, sequenceid=193, compaction requested=false 2024-12-09T17:23:22,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:22,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:22,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-12-09T17:23:22,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-12-09T17:23:22,518 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-09T17:23:22,518 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7040 sec 2024-12-09T17:23:22,519 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.7070 sec 2024-12-09T17:23:22,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:22,598 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a1b1655d3c492fdd18da414b0bd9edbd 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-09T17:23:22,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=A 2024-12-09T17:23:22,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:22,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=B 2024-12-09T17:23:22,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:22,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=C 2024-12-09T17:23:22,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:22,602 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/9d419b0a91164ec9b4a901c306eb0cf8 is 50, key is test_row_0/A:col10/1733765001480/Put/seqid=0 2024-12-09T17:23:22,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742299_1475 (size=12151) 2024-12-09T17:23:22,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:22,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765062621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:22,626 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:22,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765062622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:22,626 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:22,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765062622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:22,629 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:22,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765062625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:22,728 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:22,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765062726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:22,728 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:22,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765062726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:22,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:22,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765062726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:22,733 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:22,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765062730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:22,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-09T17:23:22,916 INFO [Thread-1946 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-12-09T17:23:22,917 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:23:22,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-12-09T17:23:22,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-09T17:23:22,918 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:23:22,918 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:23:22,918 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:23:22,931 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:22,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765062929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:22,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:22,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765062930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:22,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:22,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765062930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:22,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:22,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765062934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:23,005 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/9d419b0a91164ec9b4a901c306eb0cf8 2024-12-09T17:23:23,010 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/7e4361af905d4e2f837d59728013a3a9 is 50, key is test_row_0/B:col10/1733765001480/Put/seqid=0 2024-12-09T17:23:23,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742300_1476 (size=12151) 2024-12-09T17:23:23,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-09T17:23:23,069 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:23,070 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-09T17:23:23,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:23,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:23,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:23,070 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:23,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:23,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:23,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-09T17:23:23,222 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:23,222 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-09T17:23:23,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:23,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:23,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:23,222 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:23,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:23,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:23,234 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:23,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765063233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:23,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:23,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765063234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:23,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:23,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765063235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:23,239 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:23,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765063237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:23,374 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:23,374 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-09T17:23:23,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:23,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:23,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:23,375 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:23,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:23,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:23,413 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/7e4361af905d4e2f837d59728013a3a9 2024-12-09T17:23:23,419 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/633d9d9cbe7e49bd9fa8269dfb1fbb1a is 50, key is test_row_0/C:col10/1733765001480/Put/seqid=0 2024-12-09T17:23:23,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742301_1477 (size=12151) 2024-12-09T17:23:23,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-09T17:23:23,526 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:23,527 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-09T17:23:23,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:23,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:23,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:23,527 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:23,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:23,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:23,678 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:23,679 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-09T17:23:23,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:23,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:23,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:23,679 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:23,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:23,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:23,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:23,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765063735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:23,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:23,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765063739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:23,742 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:23,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765063740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:23,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:23,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765063742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:23,823 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/633d9d9cbe7e49bd9fa8269dfb1fbb1a 2024-12-09T17:23:23,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/9d419b0a91164ec9b4a901c306eb0cf8 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/9d419b0a91164ec9b4a901c306eb0cf8 2024-12-09T17:23:23,828 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/9d419b0a91164ec9b4a901c306eb0cf8, entries=150, sequenceid=209, filesize=11.9 K 2024-12-09T17:23:23,829 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/7e4361af905d4e2f837d59728013a3a9 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/7e4361af905d4e2f837d59728013a3a9 2024-12-09T17:23:23,831 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:23,831 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-09T17:23:23,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:23,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:23,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:23,832 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/7e4361af905d4e2f837d59728013a3a9, entries=150, sequenceid=209, filesize=11.9 K 2024-12-09T17:23:23,832 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:23,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:23,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:23,832 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/633d9d9cbe7e49bd9fa8269dfb1fbb1a as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/633d9d9cbe7e49bd9fa8269dfb1fbb1a 2024-12-09T17:23:23,835 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/633d9d9cbe7e49bd9fa8269dfb1fbb1a, entries=150, sequenceid=209, filesize=11.9 K 2024-12-09T17:23:23,836 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for a1b1655d3c492fdd18da414b0bd9edbd in 1237ms, sequenceid=209, compaction requested=true 2024-12-09T17:23:23,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:23,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1b1655d3c492fdd18da414b0bd9edbd:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:23:23,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:23,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1b1655d3c492fdd18da414b0bd9edbd:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:23:23,836 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:23,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:23,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1b1655d3c492fdd18da414b0bd9edbd:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:23:23,836 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:23,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:23,836 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:23,836 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:23,836 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): a1b1655d3c492fdd18da414b0bd9edbd/A is initiating minor compaction (all files) 2024-12-09T17:23:23,836 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): a1b1655d3c492fdd18da414b0bd9edbd/B is initiating minor compaction (all files) 2024-12-09T17:23:23,836 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1b1655d3c492fdd18da414b0bd9edbd/B in TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:23,837 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1b1655d3c492fdd18da414b0bd9edbd/A in TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:23,837 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/4939c9b7295d4341b3d211e03d7cbf10, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/555869ff0d7b488b8d2c544039578c8b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/7e4361af905d4e2f837d59728013a3a9] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp, totalSize=36.0 K 2024-12-09T17:23:23,837 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/0afd9d7681c74b4a8f323e0f236a5097, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/32a22ce842824483b7b59ec48b0c8ad8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/9d419b0a91164ec9b4a901c306eb0cf8] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp, totalSize=36.0 K 2024-12-09T17:23:23,837 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4939c9b7295d4341b3d211e03d7cbf10, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733765000327 2024-12-09T17:23:23,837 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 0afd9d7681c74b4a8f323e0f236a5097, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733765000327 2024-12-09T17:23:23,837 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 555869ff0d7b488b8d2c544039578c8b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733765000347 2024-12-09T17:23:23,837 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 32a22ce842824483b7b59ec48b0c8ad8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733765000347 2024-12-09T17:23:23,837 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e4361af905d4e2f837d59728013a3a9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733765001476 2024-12-09T17:23:23,837 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d419b0a91164ec9b4a901c306eb0cf8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733765001476 2024-12-09T17:23:23,841 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1b1655d3c492fdd18da414b0bd9edbd#A#compaction#406 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:23,841 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1b1655d3c492fdd18da414b0bd9edbd#B#compaction#405 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:23,842 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/451ec08f70fa4f549b3c4371c714fe83 is 50, key is test_row_0/A:col10/1733765001480/Put/seqid=0 2024-12-09T17:23:23,842 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/536c2154033b4df9a403adb7471973b6 is 50, key is test_row_0/B:col10/1733765001480/Put/seqid=0 2024-12-09T17:23:23,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742302_1478 (size=12663) 2024-12-09T17:23:23,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742303_1479 (size=12663) 2024-12-09T17:23:23,983 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:23,983 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-09T17:23:23,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:23,984 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing a1b1655d3c492fdd18da414b0bd9edbd 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-09T17:23:23,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=A 2024-12-09T17:23:23,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:23,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=B 2024-12-09T17:23:23,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:23,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=C 2024-12-09T17:23:23,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:23,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/9ead835300c547d5bb5640376043fbd7 is 50, key is test_row_0/A:col10/1733765002625/Put/seqid=0 2024-12-09T17:23:23,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742304_1480 (size=12151) 2024-12-09T17:23:24,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-09T17:23:24,249 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/451ec08f70fa4f549b3c4371c714fe83 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/451ec08f70fa4f549b3c4371c714fe83 2024-12-09T17:23:24,249 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/536c2154033b4df9a403adb7471973b6 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/536c2154033b4df9a403adb7471973b6 2024-12-09T17:23:24,252 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1b1655d3c492fdd18da414b0bd9edbd/B of a1b1655d3c492fdd18da414b0bd9edbd into 536c2154033b4df9a403adb7471973b6(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:24,252 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1b1655d3c492fdd18da414b0bd9edbd/A of a1b1655d3c492fdd18da414b0bd9edbd into 451ec08f70fa4f549b3c4371c714fe83(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:24,252 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:24,252 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:24,252 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., storeName=a1b1655d3c492fdd18da414b0bd9edbd/A, priority=13, startTime=1733765003836; duration=0sec 2024-12-09T17:23:24,252 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., storeName=a1b1655d3c492fdd18da414b0bd9edbd/B, priority=13, startTime=1733765003836; duration=0sec 2024-12-09T17:23:24,252 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:24,252 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1b1655d3c492fdd18da414b0bd9edbd:A 2024-12-09T17:23:24,252 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:24,252 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1b1655d3c492fdd18da414b0bd9edbd:B 2024-12-09T17:23:24,253 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:24,253 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:24,253 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): a1b1655d3c492fdd18da414b0bd9edbd/C is initiating minor compaction (all files) 2024-12-09T17:23:24,253 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1b1655d3c492fdd18da414b0bd9edbd/C in TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:24,253 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/336d18bf9a8e45b3978cb9401fb8920a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/e231eef083e747fface2285bc058e5bc, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/633d9d9cbe7e49bd9fa8269dfb1fbb1a] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp, totalSize=36.0 K 2024-12-09T17:23:24,254 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 336d18bf9a8e45b3978cb9401fb8920a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733765000327 2024-12-09T17:23:24,254 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting e231eef083e747fface2285bc058e5bc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733765000347 2024-12-09T17:23:24,254 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 633d9d9cbe7e49bd9fa8269dfb1fbb1a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733765001476 2024-12-09T17:23:24,259 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1b1655d3c492fdd18da414b0bd9edbd#C#compaction#408 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:24,260 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/deca93766062472db653ddcad28d732e is 50, key is test_row_0/C:col10/1733765001480/Put/seqid=0 2024-12-09T17:23:24,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742305_1481 (size=12663) 2024-12-09T17:23:24,265 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/deca93766062472db653ddcad28d732e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/deca93766062472db653ddcad28d732e 2024-12-09T17:23:24,269 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1b1655d3c492fdd18da414b0bd9edbd/C of a1b1655d3c492fdd18da414b0bd9edbd into deca93766062472db653ddcad28d732e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:24,269 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:24,270 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., storeName=a1b1655d3c492fdd18da414b0bd9edbd/C, priority=13, startTime=1733765003836; duration=0sec 2024-12-09T17:23:24,270 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:24,270 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1b1655d3c492fdd18da414b0bd9edbd:C 2024-12-09T17:23:24,390 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/9ead835300c547d5bb5640376043fbd7 2024-12-09T17:23:24,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/67fa906ee98b4c0c965cbe1b43c8f0cc is 50, key is test_row_0/B:col10/1733765002625/Put/seqid=0 2024-12-09T17:23:24,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742306_1482 (size=12151) 2024-12-09T17:23:24,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:24,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:24,547 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:24,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765064543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:24,650 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:24,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765064648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:24,740 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:24,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765064739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:24,747 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:24,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765064746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:24,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:24,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765064747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:24,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:24,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765064749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:24,798 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/67fa906ee98b4c0c965cbe1b43c8f0cc 2024-12-09T17:23:24,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/2eef813344224b63ada69d34342f6c06 is 50, key is test_row_0/C:col10/1733765002625/Put/seqid=0 2024-12-09T17:23:24,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742307_1483 (size=12151) 2024-12-09T17:23:24,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:24,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765064852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:25,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-09T17:23:25,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:25,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765065158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:25,206 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/2eef813344224b63ada69d34342f6c06 2024-12-09T17:23:25,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/9ead835300c547d5bb5640376043fbd7 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/9ead835300c547d5bb5640376043fbd7 2024-12-09T17:23:25,211 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/9ead835300c547d5bb5640376043fbd7, entries=150, sequenceid=232, filesize=11.9 K 2024-12-09T17:23:25,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/67fa906ee98b4c0c965cbe1b43c8f0cc as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/67fa906ee98b4c0c965cbe1b43c8f0cc 2024-12-09T17:23:25,214 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/67fa906ee98b4c0c965cbe1b43c8f0cc, entries=150, sequenceid=232, filesize=11.9 K 2024-12-09T17:23:25,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/2eef813344224b63ada69d34342f6c06 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/2eef813344224b63ada69d34342f6c06 2024-12-09T17:23:25,217 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/2eef813344224b63ada69d34342f6c06, entries=150, sequenceid=232, filesize=11.9 K 2024-12-09T17:23:25,218 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for a1b1655d3c492fdd18da414b0bd9edbd in 1234ms, sequenceid=232, compaction requested=false 2024-12-09T17:23:25,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:25,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:25,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-09T17:23:25,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-12-09T17:23:25,220 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-09T17:23:25,220 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3010 sec 2024-12-09T17:23:25,221 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 2.3030 sec 2024-12-09T17:23:25,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:25,666 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a1b1655d3c492fdd18da414b0bd9edbd 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-09T17:23:25,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=A 2024-12-09T17:23:25,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:25,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=B 2024-12-09T17:23:25,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:25,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=C 2024-12-09T17:23:25,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:25,668 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/e3c7c1b89fef43c7886d8fbc6eb65f3b is 50, key is test_row_0/A:col10/1733765004539/Put/seqid=0 2024-12-09T17:23:25,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742308_1484 (size=14541) 2024-12-09T17:23:25,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:25,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765065750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:25,859 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:25,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765065856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:26,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:26,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765066060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:26,072 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/e3c7c1b89fef43c7886d8fbc6eb65f3b 2024-12-09T17:23:26,076 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/e1e4cc59512445969ff2b379d8c0f5ea is 50, key is test_row_0/B:col10/1733765004539/Put/seqid=0 2024-12-09T17:23:26,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742309_1485 (size=12151) 2024-12-09T17:23:26,365 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:26,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765066362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:26,479 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/e1e4cc59512445969ff2b379d8c0f5ea 2024-12-09T17:23:26,485 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/ca8842180b484dd981be357d580def91 is 50, key is test_row_0/C:col10/1733765004539/Put/seqid=0 2024-12-09T17:23:26,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742310_1486 (size=12151) 2024-12-09T17:23:26,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:26,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57844 deadline: 1733765066754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:26,758 DEBUG [Thread-1944 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4136 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., hostname=80c69eb3c456,42927,1733764865379, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:23:26,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:26,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57846 deadline: 1733765066761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:26,766 DEBUG [Thread-1940 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4145 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., hostname=80c69eb3c456,42927,1733764865379, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:23:26,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:26,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57850 deadline: 1733765066764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:26,767 DEBUG [Thread-1936 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4142 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., hostname=80c69eb3c456,42927,1733764865379, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:23:26,772 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:26,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57874 deadline: 1733765066768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:26,773 DEBUG [Thread-1938 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4150 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., hostname=80c69eb3c456,42927,1733764865379, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:23:26,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:26,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765066870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:26,888 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/ca8842180b484dd981be357d580def91 2024-12-09T17:23:26,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/e3c7c1b89fef43c7886d8fbc6eb65f3b as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/e3c7c1b89fef43c7886d8fbc6eb65f3b 2024-12-09T17:23:26,894 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/e3c7c1b89fef43c7886d8fbc6eb65f3b, entries=200, sequenceid=249, filesize=14.2 K 2024-12-09T17:23:26,894 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/e1e4cc59512445969ff2b379d8c0f5ea as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/e1e4cc59512445969ff2b379d8c0f5ea 2024-12-09T17:23:26,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/e1e4cc59512445969ff2b379d8c0f5ea, entries=150, sequenceid=249, filesize=11.9 K 2024-12-09T17:23:26,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/ca8842180b484dd981be357d580def91 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/ca8842180b484dd981be357d580def91 2024-12-09T17:23:26,900 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/ca8842180b484dd981be357d580def91, entries=150, sequenceid=249, filesize=11.9 K 2024-12-09T17:23:26,901 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for a1b1655d3c492fdd18da414b0bd9edbd in 1234ms, sequenceid=249, compaction requested=true 2024-12-09T17:23:26,901 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:26,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1b1655d3c492fdd18da414b0bd9edbd:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:23:26,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:26,901 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:26,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1b1655d3c492fdd18da414b0bd9edbd:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:23:26,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:26,901 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:26,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1b1655d3c492fdd18da414b0bd9edbd:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:23:26,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:26,902 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:26,902 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): a1b1655d3c492fdd18da414b0bd9edbd/B is initiating minor compaction (all files) 2024-12-09T17:23:26,902 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1b1655d3c492fdd18da414b0bd9edbd/B in TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:26,902 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/536c2154033b4df9a403adb7471973b6, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/67fa906ee98b4c0c965cbe1b43c8f0cc, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/e1e4cc59512445969ff2b379d8c0f5ea] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp, totalSize=36.1 K 2024-12-09T17:23:26,902 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39355 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:26,902 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): a1b1655d3c492fdd18da414b0bd9edbd/A is initiating minor compaction (all files) 2024-12-09T17:23:26,902 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1b1655d3c492fdd18da414b0bd9edbd/A in TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:26,902 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 536c2154033b4df9a403adb7471973b6, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733765001476 2024-12-09T17:23:26,902 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/451ec08f70fa4f549b3c4371c714fe83, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/9ead835300c547d5bb5640376043fbd7, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/e3c7c1b89fef43c7886d8fbc6eb65f3b] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp, totalSize=38.4 K 2024-12-09T17:23:26,902 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 67fa906ee98b4c0c965cbe1b43c8f0cc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733765002619 2024-12-09T17:23:26,902 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 451ec08f70fa4f549b3c4371c714fe83, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733765001476 2024-12-09T17:23:26,903 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting e1e4cc59512445969ff2b379d8c0f5ea, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733765004539 2024-12-09T17:23:26,903 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ead835300c547d5bb5640376043fbd7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733765002619 2024-12-09T17:23:26,903 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3c7c1b89fef43c7886d8fbc6eb65f3b, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733765004526 2024-12-09T17:23:26,909 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1b1655d3c492fdd18da414b0bd9edbd#A#compaction#414 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:26,909 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1b1655d3c492fdd18da414b0bd9edbd#B#compaction#415 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:26,910 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/cc407b35fa61453ebd130e74960175cf is 50, key is test_row_0/A:col10/1733765004539/Put/seqid=0 2024-12-09T17:23:26,910 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/b4ed99077d414fc8997e9d62fa18976b is 50, key is test_row_0/B:col10/1733765004539/Put/seqid=0 2024-12-09T17:23:26,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742312_1488 (size=12765) 2024-12-09T17:23:26,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742311_1487 (size=12765) 2024-12-09T17:23:27,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-09T17:23:27,022 INFO [Thread-1946 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-12-09T17:23:27,023 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:23:27,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-12-09T17:23:27,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-09T17:23:27,024 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:23:27,025 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:23:27,025 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:23:27,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-09T17:23:27,176 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:27,176 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-09T17:23:27,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:27,176 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing a1b1655d3c492fdd18da414b0bd9edbd 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-09T17:23:27,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=A 2024-12-09T17:23:27,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:27,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=B 2024-12-09T17:23:27,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:27,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=C 2024-12-09T17:23:27,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:27,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/008abfed02d2462f95a047c66efaf762 is 50, key is test_row_0/A:col10/1733765005734/Put/seqid=0 2024-12-09T17:23:27,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742313_1489 (size=12301) 2024-12-09T17:23:27,324 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/cc407b35fa61453ebd130e74960175cf as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/cc407b35fa61453ebd130e74960175cf 2024-12-09T17:23:27,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-09T17:23:27,327 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/b4ed99077d414fc8997e9d62fa18976b as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/b4ed99077d414fc8997e9d62fa18976b 2024-12-09T17:23:27,327 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1b1655d3c492fdd18da414b0bd9edbd/A of a1b1655d3c492fdd18da414b0bd9edbd into cc407b35fa61453ebd130e74960175cf(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:27,327 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:27,327 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., storeName=a1b1655d3c492fdd18da414b0bd9edbd/A, priority=13, startTime=1733765006901; duration=0sec 2024-12-09T17:23:27,327 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:27,327 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1b1655d3c492fdd18da414b0bd9edbd:A 2024-12-09T17:23:27,327 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:27,328 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:27,328 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): a1b1655d3c492fdd18da414b0bd9edbd/C is initiating minor compaction (all files) 2024-12-09T17:23:27,328 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1b1655d3c492fdd18da414b0bd9edbd/C in TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:27,328 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/deca93766062472db653ddcad28d732e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/2eef813344224b63ada69d34342f6c06, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/ca8842180b484dd981be357d580def91] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp, totalSize=36.1 K 2024-12-09T17:23:27,328 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting deca93766062472db653ddcad28d732e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733765001476 2024-12-09T17:23:27,329 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2eef813344224b63ada69d34342f6c06, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733765002619 2024-12-09T17:23:27,329 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca8842180b484dd981be357d580def91, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733765004539 2024-12-09T17:23:27,330 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1b1655d3c492fdd18da414b0bd9edbd/B of a1b1655d3c492fdd18da414b0bd9edbd into b4ed99077d414fc8997e9d62fa18976b(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:27,330 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:27,330 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., storeName=a1b1655d3c492fdd18da414b0bd9edbd/B, priority=13, startTime=1733765006901; duration=0sec 2024-12-09T17:23:27,330 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:27,330 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1b1655d3c492fdd18da414b0bd9edbd:B 2024-12-09T17:23:27,333 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1b1655d3c492fdd18da414b0bd9edbd#C#compaction#417 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:27,334 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/9fb03b388d25433e94d88a0a99031bef is 50, key is test_row_0/C:col10/1733765004539/Put/seqid=0 2024-12-09T17:23:27,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742314_1490 (size=12765) 2024-12-09T17:23:27,621 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/008abfed02d2462f95a047c66efaf762 2024-12-09T17:23:27,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-09T17:23:27,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/fc5dc2c0de2c4144a3bf1b38badbc4ee is 50, key is test_row_0/B:col10/1733765005734/Put/seqid=0 2024-12-09T17:23:27,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742315_1491 (size=12301) 2024-12-09T17:23:27,740 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/9fb03b388d25433e94d88a0a99031bef as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/9fb03b388d25433e94d88a0a99031bef 2024-12-09T17:23:27,743 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1b1655d3c492fdd18da414b0bd9edbd/C of a1b1655d3c492fdd18da414b0bd9edbd into 9fb03b388d25433e94d88a0a99031bef(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:27,743 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:27,743 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., storeName=a1b1655d3c492fdd18da414b0bd9edbd/C, priority=13, startTime=1733765006901; duration=0sec 2024-12-09T17:23:27,743 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:27,743 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1b1655d3c492fdd18da414b0bd9edbd:C 2024-12-09T17:23:27,884 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:27,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:27,925 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:27,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765067920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:28,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:28,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765068026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:28,030 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/fc5dc2c0de2c4144a3bf1b38badbc4ee 2024-12-09T17:23:28,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/125e8802209244edbaf2e5c1d39e555b is 50, key is test_row_0/C:col10/1733765005734/Put/seqid=0 2024-12-09T17:23:28,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742316_1492 (size=12301) 2024-12-09T17:23:28,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-09T17:23:28,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:28,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765068230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:28,438 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/125e8802209244edbaf2e5c1d39e555b 2024-12-09T17:23:28,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/008abfed02d2462f95a047c66efaf762 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/008abfed02d2462f95a047c66efaf762 2024-12-09T17:23:28,443 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/008abfed02d2462f95a047c66efaf762, entries=150, sequenceid=271, filesize=12.0 K 2024-12-09T17:23:28,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/fc5dc2c0de2c4144a3bf1b38badbc4ee as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/fc5dc2c0de2c4144a3bf1b38badbc4ee 2024-12-09T17:23:28,446 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/fc5dc2c0de2c4144a3bf1b38badbc4ee, entries=150, sequenceid=271, filesize=12.0 K 2024-12-09T17:23:28,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/125e8802209244edbaf2e5c1d39e555b as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/125e8802209244edbaf2e5c1d39e555b 2024-12-09T17:23:28,449 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/125e8802209244edbaf2e5c1d39e555b, entries=150, sequenceid=271, filesize=12.0 K 2024-12-09T17:23:28,449 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for a1b1655d3c492fdd18da414b0bd9edbd in 1273ms, sequenceid=271, compaction requested=false 2024-12-09T17:23:28,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:28,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:28,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-09T17:23:28,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-12-09T17:23:28,451 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-12-09T17:23:28,451 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4250 sec 2024-12-09T17:23:28,451 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 1.4280 sec 2024-12-09T17:23:28,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:28,539 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a1b1655d3c492fdd18da414b0bd9edbd 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-09T17:23:28,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=A 2024-12-09T17:23:28,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:28,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=B 2024-12-09T17:23:28,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:28,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=C 2024-12-09T17:23:28,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:28,543 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/dfedb235d95147e5baaf50875543797e is 50, key is test_row_0/A:col10/1733765007916/Put/seqid=0 2024-12-09T17:23:28,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742317_1493 (size=14741) 2024-12-09T17:23:28,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:28,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765068617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:28,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:28,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765068722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:28,929 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:28,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765068928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:28,947 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/dfedb235d95147e5baaf50875543797e 2024-12-09T17:23:28,952 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/6d6275c096d84b3dbe71aded995ba283 is 50, key is test_row_0/B:col10/1733765007916/Put/seqid=0 2024-12-09T17:23:28,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742318_1494 (size=12301) 2024-12-09T17:23:29,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-09T17:23:29,127 INFO [Thread-1946 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-12-09T17:23:29,128 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:23:29,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-12-09T17:23:29,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-09T17:23:29,129 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:23:29,129 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:23:29,129 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:23:29,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-09T17:23:29,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:29,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765069230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:29,280 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:29,281 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-09T17:23:29,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:29,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:29,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:29,281 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:29,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:29,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:29,356 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/6d6275c096d84b3dbe71aded995ba283 2024-12-09T17:23:29,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/27578290858f4975a7dfe91fcf82ce8c is 50, key is test_row_0/C:col10/1733765007916/Put/seqid=0 2024-12-09T17:23:29,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742319_1495 (size=12301) 2024-12-09T17:23:29,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-09T17:23:29,432 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:29,433 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-09T17:23:29,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:29,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:29,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:29,433 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:29,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:29,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:29,585 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:29,585 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-09T17:23:29,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:29,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:29,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:29,585 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:29,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:29,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:29,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-09T17:23:29,737 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:29,737 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-09T17:23:29,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:29,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. as already flushing 2024-12-09T17:23:29,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:29,737 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:29,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:29,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:29,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:29,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57890 deadline: 1733765069736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:29,764 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/27578290858f4975a7dfe91fcf82ce8c 2024-12-09T17:23:29,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/dfedb235d95147e5baaf50875543797e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/dfedb235d95147e5baaf50875543797e 2024-12-09T17:23:29,769 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/dfedb235d95147e5baaf50875543797e, entries=200, sequenceid=289, filesize=14.4 K 2024-12-09T17:23:29,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/6d6275c096d84b3dbe71aded995ba283 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/6d6275c096d84b3dbe71aded995ba283 2024-12-09T17:23:29,772 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/6d6275c096d84b3dbe71aded995ba283, entries=150, sequenceid=289, filesize=12.0 K 2024-12-09T17:23:29,773 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/27578290858f4975a7dfe91fcf82ce8c as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/27578290858f4975a7dfe91fcf82ce8c 2024-12-09T17:23:29,775 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/27578290858f4975a7dfe91fcf82ce8c, entries=150, sequenceid=289, filesize=12.0 K 2024-12-09T17:23:29,776 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for a1b1655d3c492fdd18da414b0bd9edbd in 1237ms, sequenceid=289, compaction requested=true 2024-12-09T17:23:29,776 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:29,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1b1655d3c492fdd18da414b0bd9edbd:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:23:29,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:29,776 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:29,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1b1655d3c492fdd18da414b0bd9edbd:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:23:29,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:29,776 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:29,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1b1655d3c492fdd18da414b0bd9edbd:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:23:29,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:29,777 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39807 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:29,777 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:29,777 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): a1b1655d3c492fdd18da414b0bd9edbd/B is initiating minor compaction (all files) 2024-12-09T17:23:29,777 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): a1b1655d3c492fdd18da414b0bd9edbd/A is initiating minor compaction (all files) 2024-12-09T17:23:29,777 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1b1655d3c492fdd18da414b0bd9edbd/A in TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:29,777 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1b1655d3c492fdd18da414b0bd9edbd/B in TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:29,777 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/cc407b35fa61453ebd130e74960175cf, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/008abfed02d2462f95a047c66efaf762, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/dfedb235d95147e5baaf50875543797e] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp, totalSize=38.9 K 2024-12-09T17:23:29,777 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/b4ed99077d414fc8997e9d62fa18976b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/fc5dc2c0de2c4144a3bf1b38badbc4ee, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/6d6275c096d84b3dbe71aded995ba283] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp, totalSize=36.5 K 2024-12-09T17:23:29,777 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc407b35fa61453ebd130e74960175cf, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733765004539 2024-12-09T17:23:29,777 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting b4ed99077d414fc8997e9d62fa18976b, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733765004539 2024-12-09T17:23:29,777 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 008abfed02d2462f95a047c66efaf762, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1733765005734 2024-12-09T17:23:29,777 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting fc5dc2c0de2c4144a3bf1b38badbc4ee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1733765005734 2024-12-09T17:23:29,778 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting dfedb235d95147e5baaf50875543797e, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1733765007907 2024-12-09T17:23:29,778 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d6275c096d84b3dbe71aded995ba283, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1733765007916 2024-12-09T17:23:29,792 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1b1655d3c492fdd18da414b0bd9edbd#B#compaction#424 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:29,792 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1b1655d3c492fdd18da414b0bd9edbd#A#compaction#423 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:29,792 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/a0b63150d0bc4abdb2fba5094665de1c is 50, key is test_row_0/A:col10/1733765007916/Put/seqid=0 2024-12-09T17:23:29,792 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/eb86879db05d4d3eae4fb40dee5ddd52 is 50, key is test_row_0/B:col10/1733765007916/Put/seqid=0 2024-12-09T17:23:29,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742320_1496 (size=13017) 2024-12-09T17:23:29,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742321_1497 (size=13017) 2024-12-09T17:23:29,809 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/a0b63150d0bc4abdb2fba5094665de1c as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/a0b63150d0bc4abdb2fba5094665de1c 2024-12-09T17:23:29,812 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1b1655d3c492fdd18da414b0bd9edbd/A of a1b1655d3c492fdd18da414b0bd9edbd into a0b63150d0bc4abdb2fba5094665de1c(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:29,812 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:29,812 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., storeName=a1b1655d3c492fdd18da414b0bd9edbd/A, priority=13, startTime=1733765009776; duration=0sec 2024-12-09T17:23:29,812 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:29,812 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1b1655d3c492fdd18da414b0bd9edbd:A 2024-12-09T17:23:29,812 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:29,813 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:29,813 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): a1b1655d3c492fdd18da414b0bd9edbd/C is initiating minor compaction (all files) 2024-12-09T17:23:29,813 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a1b1655d3c492fdd18da414b0bd9edbd/C in TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:29,813 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/9fb03b388d25433e94d88a0a99031bef, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/125e8802209244edbaf2e5c1d39e555b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/27578290858f4975a7dfe91fcf82ce8c] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp, totalSize=36.5 K 2024-12-09T17:23:29,813 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9fb03b388d25433e94d88a0a99031bef, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733765004539 2024-12-09T17:23:29,813 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 125e8802209244edbaf2e5c1d39e555b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1733765005734 2024-12-09T17:23:29,813 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27578290858f4975a7dfe91fcf82ce8c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1733765007916 2024-12-09T17:23:29,818 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1b1655d3c492fdd18da414b0bd9edbd#C#compaction#425 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:29,818 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/66681efd332849bc9cd6dd1279cc71f0 is 50, key is test_row_0/C:col10/1733765007916/Put/seqid=0 2024-12-09T17:23:29,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742322_1498 (size=13017) 2024-12-09T17:23:29,889 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:29,889 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-09T17:23:29,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:29,890 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing a1b1655d3c492fdd18da414b0bd9edbd 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-09T17:23:29,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=A 2024-12-09T17:23:29,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:29,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=B 2024-12-09T17:23:29,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:29,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=C 2024-12-09T17:23:29,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:29,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/3b55bc3121b141649d051a08ee23d98f is 50, key is test_row_0/A:col10/1733765008616/Put/seqid=0 2024-12-09T17:23:29,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742323_1499 (size=12301) 2024-12-09T17:23:29,902 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/3b55bc3121b141649d051a08ee23d98f 2024-12-09T17:23:29,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/7fa48ade8ab7443996a58bf5b9de6cb2 is 50, key is test_row_0/B:col10/1733765008616/Put/seqid=0 2024-12-09T17:23:29,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742324_1500 (size=12301) 2024-12-09T17:23:29,914 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/7fa48ade8ab7443996a58bf5b9de6cb2 2024-12-09T17:23:29,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/649dcdb190294c5dba0dd88cc7ed1f08 is 50, key is test_row_0/C:col10/1733765008616/Put/seqid=0 2024-12-09T17:23:29,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742325_1501 (size=12301) 2024-12-09T17:23:30,199 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/eb86879db05d4d3eae4fb40dee5ddd52 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/eb86879db05d4d3eae4fb40dee5ddd52 2024-12-09T17:23:30,202 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1b1655d3c492fdd18da414b0bd9edbd/B of a1b1655d3c492fdd18da414b0bd9edbd into eb86879db05d4d3eae4fb40dee5ddd52(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:30,202 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:30,202 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., storeName=a1b1655d3c492fdd18da414b0bd9edbd/B, priority=13, startTime=1733765009776; duration=0sec 2024-12-09T17:23:30,202 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:30,202 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1b1655d3c492fdd18da414b0bd9edbd:B 2024-12-09T17:23:30,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-09T17:23:30,241 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/66681efd332849bc9cd6dd1279cc71f0 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/66681efd332849bc9cd6dd1279cc71f0 2024-12-09T17:23:30,244 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a1b1655d3c492fdd18da414b0bd9edbd/C of a1b1655d3c492fdd18da414b0bd9edbd into 66681efd332849bc9cd6dd1279cc71f0(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:30,244 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:30,244 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd., storeName=a1b1655d3c492fdd18da414b0bd9edbd/C, priority=13, startTime=1733765009776; duration=0sec 2024-12-09T17:23:30,245 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:30,245 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1b1655d3c492fdd18da414b0bd9edbd:C 2024-12-09T17:23:30,323 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/649dcdb190294c5dba0dd88cc7ed1f08 2024-12-09T17:23:30,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/3b55bc3121b141649d051a08ee23d98f as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/3b55bc3121b141649d051a08ee23d98f 2024-12-09T17:23:30,329 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/3b55bc3121b141649d051a08ee23d98f, entries=150, sequenceid=311, filesize=12.0 K 2024-12-09T17:23:30,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/7fa48ade8ab7443996a58bf5b9de6cb2 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/7fa48ade8ab7443996a58bf5b9de6cb2 2024-12-09T17:23:30,347 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/7fa48ade8ab7443996a58bf5b9de6cb2, entries=150, sequenceid=311, filesize=12.0 K 2024-12-09T17:23:30,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/649dcdb190294c5dba0dd88cc7ed1f08 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/649dcdb190294c5dba0dd88cc7ed1f08 2024-12-09T17:23:30,351 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/649dcdb190294c5dba0dd88cc7ed1f08, entries=150, sequenceid=311, filesize=12.0 K 2024-12-09T17:23:30,351 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=0 B/0 for a1b1655d3c492fdd18da414b0bd9edbd in 461ms, sequenceid=311, compaction requested=false 2024-12-09T17:23:30,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:30,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:30,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-12-09T17:23:30,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-12-09T17:23:30,354 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-12-09T17:23:30,354 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2230 sec 2024-12-09T17:23:30,355 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 1.2260 sec 2024-12-09T17:23:30,392 DEBUG [Thread-1953 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6dee2855 to 127.0.0.1:54326 2024-12-09T17:23:30,392 DEBUG [Thread-1953 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:30,393 DEBUG [Thread-1951 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0657e1bf to 127.0.0.1:54326 2024-12-09T17:23:30,393 DEBUG [Thread-1951 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:30,393 DEBUG [Thread-1949 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x10bda459 to 127.0.0.1:54326 2024-12-09T17:23:30,393 DEBUG [Thread-1949 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:30,393 DEBUG [Thread-1947 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x402e5def to 127.0.0.1:54326 2024-12-09T17:23:30,393 DEBUG [Thread-1947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:30,394 DEBUG [Thread-1955 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x54e8a98a to 127.0.0.1:54326 2024-12-09T17:23:30,394 DEBUG [Thread-1955 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:30,746 DEBUG [Thread-1942 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x134bfe32 to 127.0.0.1:54326 2024-12-09T17:23:30,746 DEBUG [Thread-1942 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:30,780 DEBUG [Thread-1944 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17b55f2f to 127.0.0.1:54326 2024-12-09T17:23:30,780 DEBUG [Thread-1944 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:30,794 DEBUG [Thread-1938 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x315a23ef to 127.0.0.1:54326 2024-12-09T17:23:30,794 DEBUG [Thread-1938 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:30,807 DEBUG [Thread-1940 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d125972 to 127.0.0.1:54326 2024-12-09T17:23:30,807 DEBUG [Thread-1940 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:30,808 DEBUG [Thread-1936 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x345fa4f7 to 127.0.0.1:54326 2024-12-09T17:23:30,808 DEBUG [Thread-1936 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:31,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-09T17:23:31,233 INFO [Thread-1946 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-12-09T17:23:31,234 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-09T17:23:31,234 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 38 2024-12-09T17:23:31,234 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 39 2024-12-09T17:23:31,234 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 40 2024-12-09T17:23:31,234 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 93 2024-12-09T17:23:31,234 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 38 2024-12-09T17:23:31,234 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-09T17:23:31,234 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-09T17:23:31,234 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3364 2024-12-09T17:23:31,234 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10092 rows 2024-12-09T17:23:31,234 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3387 2024-12-09T17:23:31,234 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10161 rows 2024-12-09T17:23:31,234 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3379 2024-12-09T17:23:31,234 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10137 rows 2024-12-09T17:23:31,234 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3396 2024-12-09T17:23:31,234 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10188 rows 2024-12-09T17:23:31,234 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3375 2024-12-09T17:23:31,234 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10125 rows 2024-12-09T17:23:31,235 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-09T17:23:31,235 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x048087da to 127.0.0.1:54326 2024-12-09T17:23:31,235 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:31,240 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-09T17:23:31,241 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-09T17:23:31,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-09T17:23:31,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-09T17:23:31,244 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733765011244"}]},"ts":"1733765011244"} 2024-12-09T17:23:31,245 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-09T17:23:31,285 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-09T17:23:31,286 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-09T17:23:31,288 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a1b1655d3c492fdd18da414b0bd9edbd, UNASSIGN}] 2024-12-09T17:23:31,290 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a1b1655d3c492fdd18da414b0bd9edbd, UNASSIGN 2024-12-09T17:23:31,291 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=a1b1655d3c492fdd18da414b0bd9edbd, regionState=CLOSING, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:31,292 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T17:23:31,292 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; CloseRegionProcedure a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379}] 2024-12-09T17:23:31,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-09T17:23:31,443 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:31,444 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(124): Close a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:31,444 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-09T17:23:31,444 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1681): Closing a1b1655d3c492fdd18da414b0bd9edbd, disabling compactions & flushes 2024-12-09T17:23:31,444 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:31,444 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:31,444 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. after waiting 0 ms 2024-12-09T17:23:31,444 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:31,444 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(2837): Flushing a1b1655d3c492fdd18da414b0bd9edbd 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-09T17:23:31,444 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=A 2024-12-09T17:23:31,445 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:31,445 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=B 2024-12-09T17:23:31,445 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:31,445 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a1b1655d3c492fdd18da414b0bd9edbd, store=C 2024-12-09T17:23:31,445 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:31,449 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/fdcd903443e544bf8b3e9bd24b2d7e27 is 50, key is test_row_0/A:col10/1733765010805/Put/seqid=0 2024-12-09T17:23:31,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742326_1502 (size=12301) 2024-12-09T17:23:31,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-09T17:23:31,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-09T17:23:31,855 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/fdcd903443e544bf8b3e9bd24b2d7e27 2024-12-09T17:23:31,865 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/c0feacadbc194b3bba40418e840ca552 is 50, key is test_row_0/B:col10/1733765010805/Put/seqid=0 2024-12-09T17:23:31,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742327_1503 (size=12301) 2024-12-09T17:23:32,272 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/c0feacadbc194b3bba40418e840ca552 2024-12-09T17:23:32,315 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/be14b2b6269f467999db28b1a1b905dd is 50, key is test_row_0/C:col10/1733765010805/Put/seqid=0 2024-12-09T17:23:32,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742328_1504 (size=12301) 2024-12-09T17:23:32,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-09T17:23:32,723 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/be14b2b6269f467999db28b1a1b905dd 2024-12-09T17:23:32,731 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/A/fdcd903443e544bf8b3e9bd24b2d7e27 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/fdcd903443e544bf8b3e9bd24b2d7e27 2024-12-09T17:23:32,736 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/fdcd903443e544bf8b3e9bd24b2d7e27, entries=150, sequenceid=321, filesize=12.0 K 2024-12-09T17:23:32,737 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/B/c0feacadbc194b3bba40418e840ca552 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/c0feacadbc194b3bba40418e840ca552 2024-12-09T17:23:32,740 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/c0feacadbc194b3bba40418e840ca552, entries=150, sequenceid=321, filesize=12.0 K 2024-12-09T17:23:32,741 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/.tmp/C/be14b2b6269f467999db28b1a1b905dd as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/be14b2b6269f467999db28b1a1b905dd 2024-12-09T17:23:32,744 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/be14b2b6269f467999db28b1a1b905dd, entries=150, sequenceid=321, filesize=12.0 K 2024-12-09T17:23:32,745 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for a1b1655d3c492fdd18da414b0bd9edbd in 1300ms, sequenceid=321, compaction requested=true 2024-12-09T17:23:32,745 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/d999d8cb1a584c40bfad61ca232e1141, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/5de2ca174d4242bda0805d9dcd5a962a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/955217a320ad43d08dda0dd78f322fc9, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/1d6d09647cd64de081e014de10ae0d34, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/1fa2460493ad4facb8bd2a5355ee69ce, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/9b6e859b14b6497ca534d5e9fbacbc41, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/33a5c88ed64c44dcaad9db5857a3e9d2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/bbc03db6d14d493d9ab250b869f67bfd, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/087d0818d8ba4569b81ffb02cda83014, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/bc9abb1fa251464a9e288a924d2fe87c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/f365f92e925648d1a486223acd447117, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/573550c320ed474dbb86dbab848569d4, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/0afd9d7681c74b4a8f323e0f236a5097, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/32a22ce842824483b7b59ec48b0c8ad8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/451ec08f70fa4f549b3c4371c714fe83, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/9d419b0a91164ec9b4a901c306eb0cf8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/9ead835300c547d5bb5640376043fbd7, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/e3c7c1b89fef43c7886d8fbc6eb65f3b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/cc407b35fa61453ebd130e74960175cf, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/008abfed02d2462f95a047c66efaf762, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/dfedb235d95147e5baaf50875543797e] to archive 2024-12-09T17:23:32,746 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T17:23:32,748 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/d999d8cb1a584c40bfad61ca232e1141 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/d999d8cb1a584c40bfad61ca232e1141 2024-12-09T17:23:32,749 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/5de2ca174d4242bda0805d9dcd5a962a to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/5de2ca174d4242bda0805d9dcd5a962a 2024-12-09T17:23:32,750 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/955217a320ad43d08dda0dd78f322fc9 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/955217a320ad43d08dda0dd78f322fc9 2024-12-09T17:23:32,752 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/1d6d09647cd64de081e014de10ae0d34 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/1d6d09647cd64de081e014de10ae0d34 2024-12-09T17:23:32,753 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/1fa2460493ad4facb8bd2a5355ee69ce to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/1fa2460493ad4facb8bd2a5355ee69ce 2024-12-09T17:23:32,754 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/9b6e859b14b6497ca534d5e9fbacbc41 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/9b6e859b14b6497ca534d5e9fbacbc41 2024-12-09T17:23:32,756 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/33a5c88ed64c44dcaad9db5857a3e9d2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/33a5c88ed64c44dcaad9db5857a3e9d2 2024-12-09T17:23:32,757 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/bbc03db6d14d493d9ab250b869f67bfd to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/bbc03db6d14d493d9ab250b869f67bfd 2024-12-09T17:23:32,759 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/087d0818d8ba4569b81ffb02cda83014 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/087d0818d8ba4569b81ffb02cda83014 2024-12-09T17:23:32,760 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/bc9abb1fa251464a9e288a924d2fe87c to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/bc9abb1fa251464a9e288a924d2fe87c 2024-12-09T17:23:32,762 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/f365f92e925648d1a486223acd447117 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/f365f92e925648d1a486223acd447117 2024-12-09T17:23:32,763 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/573550c320ed474dbb86dbab848569d4 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/573550c320ed474dbb86dbab848569d4 2024-12-09T17:23:32,764 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/0afd9d7681c74b4a8f323e0f236a5097 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/0afd9d7681c74b4a8f323e0f236a5097 2024-12-09T17:23:32,765 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/32a22ce842824483b7b59ec48b0c8ad8 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/32a22ce842824483b7b59ec48b0c8ad8 2024-12-09T17:23:32,766 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/451ec08f70fa4f549b3c4371c714fe83 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/451ec08f70fa4f549b3c4371c714fe83 2024-12-09T17:23:32,766 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/9d419b0a91164ec9b4a901c306eb0cf8 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/9d419b0a91164ec9b4a901c306eb0cf8 2024-12-09T17:23:32,767 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/9ead835300c547d5bb5640376043fbd7 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/9ead835300c547d5bb5640376043fbd7 2024-12-09T17:23:32,768 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/e3c7c1b89fef43c7886d8fbc6eb65f3b to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/e3c7c1b89fef43c7886d8fbc6eb65f3b 2024-12-09T17:23:32,768 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/cc407b35fa61453ebd130e74960175cf to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/cc407b35fa61453ebd130e74960175cf 2024-12-09T17:23:32,769 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/008abfed02d2462f95a047c66efaf762 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/008abfed02d2462f95a047c66efaf762 2024-12-09T17:23:32,769 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/dfedb235d95147e5baaf50875543797e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/dfedb235d95147e5baaf50875543797e 2024-12-09T17:23:32,770 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/5bca55b135764110a69ea5a1bf02624f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/9a41f8f1c655418a9ff938c5909e71c9, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/f6ae036b7a3c4b28bf4aa254b8bbd2cd, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/117cf219c9134019a8f8be3d54249da5, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/9929bb28fb5245cbbbd017360d07f8d8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/5021e07632d14e5b84b0dbc1f7d5614a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/657c49180c6a4e468851d8504bc62676, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/13aad20a0698444e973fd5c553a20ef2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/8c2fbdbc69924def85f175b37065410e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/004545bffafe4e8cb62e8375936fed95, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/569e6b61f2d343288ca549f40974877d, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/4939c9b7295d4341b3d211e03d7cbf10, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/f8517ca746ec4bc5a8760b529a725ca8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/555869ff0d7b488b8d2c544039578c8b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/536c2154033b4df9a403adb7471973b6, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/7e4361af905d4e2f837d59728013a3a9, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/67fa906ee98b4c0c965cbe1b43c8f0cc, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/b4ed99077d414fc8997e9d62fa18976b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/e1e4cc59512445969ff2b379d8c0f5ea, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/fc5dc2c0de2c4144a3bf1b38badbc4ee, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/6d6275c096d84b3dbe71aded995ba283] to archive 2024-12-09T17:23:32,771 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T17:23:32,772 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/5bca55b135764110a69ea5a1bf02624f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/5bca55b135764110a69ea5a1bf02624f 2024-12-09T17:23:32,772 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/9a41f8f1c655418a9ff938c5909e71c9 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/9a41f8f1c655418a9ff938c5909e71c9 2024-12-09T17:23:32,773 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/f6ae036b7a3c4b28bf4aa254b8bbd2cd to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/f6ae036b7a3c4b28bf4aa254b8bbd2cd 2024-12-09T17:23:32,774 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/117cf219c9134019a8f8be3d54249da5 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/117cf219c9134019a8f8be3d54249da5 2024-12-09T17:23:32,774 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/9929bb28fb5245cbbbd017360d07f8d8 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/9929bb28fb5245cbbbd017360d07f8d8 2024-12-09T17:23:32,775 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/5021e07632d14e5b84b0dbc1f7d5614a to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/5021e07632d14e5b84b0dbc1f7d5614a 2024-12-09T17:23:32,775 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/657c49180c6a4e468851d8504bc62676 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/657c49180c6a4e468851d8504bc62676 2024-12-09T17:23:32,776 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/13aad20a0698444e973fd5c553a20ef2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/13aad20a0698444e973fd5c553a20ef2 2024-12-09T17:23:32,777 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/8c2fbdbc69924def85f175b37065410e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/8c2fbdbc69924def85f175b37065410e 2024-12-09T17:23:32,777 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/004545bffafe4e8cb62e8375936fed95 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/004545bffafe4e8cb62e8375936fed95 2024-12-09T17:23:32,778 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/569e6b61f2d343288ca549f40974877d to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/569e6b61f2d343288ca549f40974877d 2024-12-09T17:23:32,778 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/4939c9b7295d4341b3d211e03d7cbf10 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/4939c9b7295d4341b3d211e03d7cbf10 2024-12-09T17:23:32,779 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/f8517ca746ec4bc5a8760b529a725ca8 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/f8517ca746ec4bc5a8760b529a725ca8 2024-12-09T17:23:32,780 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/555869ff0d7b488b8d2c544039578c8b to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/555869ff0d7b488b8d2c544039578c8b 2024-12-09T17:23:32,780 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/536c2154033b4df9a403adb7471973b6 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/536c2154033b4df9a403adb7471973b6 2024-12-09T17:23:32,781 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/7e4361af905d4e2f837d59728013a3a9 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/7e4361af905d4e2f837d59728013a3a9 2024-12-09T17:23:32,781 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/67fa906ee98b4c0c965cbe1b43c8f0cc to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/67fa906ee98b4c0c965cbe1b43c8f0cc 2024-12-09T17:23:32,782 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/b4ed99077d414fc8997e9d62fa18976b to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/b4ed99077d414fc8997e9d62fa18976b 2024-12-09T17:23:32,782 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/e1e4cc59512445969ff2b379d8c0f5ea to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/e1e4cc59512445969ff2b379d8c0f5ea 2024-12-09T17:23:32,783 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/fc5dc2c0de2c4144a3bf1b38badbc4ee to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/fc5dc2c0de2c4144a3bf1b38badbc4ee 2024-12-09T17:23:32,783 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/6d6275c096d84b3dbe71aded995ba283 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/6d6275c096d84b3dbe71aded995ba283 2024-12-09T17:23:32,784 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/6eb81226d5e144d791e8ae5548685a75, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/5ce6e459fbbd4daf8f8debfc3e7d9cc5, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/80e05f3bcdee4696bbba63785aebcadb, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/e000c6dbec62473f89e3b0aacb2667e8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/eefb8649e78f46feaf6c1c07be124bf9, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/7ac79cf2d6344d78865942dd1df4d463, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/8844fb6532324710a509d9f737865f85, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/a9973cba35514fdc8540c2f4393576b6, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/2f02e40eeee7491d83df80bfa24f736f, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/3c33f976a4444640b5d1894ef4cd29f8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/cfd89e8128a54382b9ba60ff917fa46c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/336d18bf9a8e45b3978cb9401fb8920a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/a8c3746177994393a07391a4099bd1bc, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/e231eef083e747fface2285bc058e5bc, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/deca93766062472db653ddcad28d732e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/633d9d9cbe7e49bd9fa8269dfb1fbb1a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/2eef813344224b63ada69d34342f6c06, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/9fb03b388d25433e94d88a0a99031bef, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/ca8842180b484dd981be357d580def91, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/125e8802209244edbaf2e5c1d39e555b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/27578290858f4975a7dfe91fcf82ce8c] to archive 2024-12-09T17:23:32,784 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T17:23:32,785 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/6eb81226d5e144d791e8ae5548685a75 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/6eb81226d5e144d791e8ae5548685a75 2024-12-09T17:23:32,786 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/5ce6e459fbbd4daf8f8debfc3e7d9cc5 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/5ce6e459fbbd4daf8f8debfc3e7d9cc5 2024-12-09T17:23:32,786 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/80e05f3bcdee4696bbba63785aebcadb to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/80e05f3bcdee4696bbba63785aebcadb 2024-12-09T17:23:32,787 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/e000c6dbec62473f89e3b0aacb2667e8 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/e000c6dbec62473f89e3b0aacb2667e8 2024-12-09T17:23:32,788 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/eefb8649e78f46feaf6c1c07be124bf9 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/eefb8649e78f46feaf6c1c07be124bf9 2024-12-09T17:23:32,788 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/7ac79cf2d6344d78865942dd1df4d463 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/7ac79cf2d6344d78865942dd1df4d463 2024-12-09T17:23:32,789 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/8844fb6532324710a509d9f737865f85 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/8844fb6532324710a509d9f737865f85 2024-12-09T17:23:32,790 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/a9973cba35514fdc8540c2f4393576b6 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/a9973cba35514fdc8540c2f4393576b6 2024-12-09T17:23:32,791 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/2f02e40eeee7491d83df80bfa24f736f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/2f02e40eeee7491d83df80bfa24f736f 2024-12-09T17:23:32,791 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/3c33f976a4444640b5d1894ef4cd29f8 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/3c33f976a4444640b5d1894ef4cd29f8 2024-12-09T17:23:32,792 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/cfd89e8128a54382b9ba60ff917fa46c to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/cfd89e8128a54382b9ba60ff917fa46c 2024-12-09T17:23:32,793 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/336d18bf9a8e45b3978cb9401fb8920a to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/336d18bf9a8e45b3978cb9401fb8920a 2024-12-09T17:23:32,794 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/a8c3746177994393a07391a4099bd1bc to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/a8c3746177994393a07391a4099bd1bc 2024-12-09T17:23:32,794 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/e231eef083e747fface2285bc058e5bc to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/e231eef083e747fface2285bc058e5bc 2024-12-09T17:23:32,795 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/deca93766062472db653ddcad28d732e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/deca93766062472db653ddcad28d732e 2024-12-09T17:23:32,796 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/633d9d9cbe7e49bd9fa8269dfb1fbb1a to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/633d9d9cbe7e49bd9fa8269dfb1fbb1a 2024-12-09T17:23:32,796 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/2eef813344224b63ada69d34342f6c06 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/2eef813344224b63ada69d34342f6c06 2024-12-09T17:23:32,797 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/9fb03b388d25433e94d88a0a99031bef to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/9fb03b388d25433e94d88a0a99031bef 2024-12-09T17:23:32,798 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/ca8842180b484dd981be357d580def91 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/ca8842180b484dd981be357d580def91 2024-12-09T17:23:32,798 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/125e8802209244edbaf2e5c1d39e555b to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/125e8802209244edbaf2e5c1d39e555b 2024-12-09T17:23:32,799 DEBUG [StoreCloser-TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/27578290858f4975a7dfe91fcf82ce8c to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/27578290858f4975a7dfe91fcf82ce8c 2024-12-09T17:23:32,802 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/recovered.edits/324.seqid, newMaxSeqId=324, maxSeqId=1 2024-12-09T17:23:32,803 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd. 2024-12-09T17:23:32,803 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1635): Region close journal for a1b1655d3c492fdd18da414b0bd9edbd: 2024-12-09T17:23:32,804 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(170): Closed a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:32,805 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=a1b1655d3c492fdd18da414b0bd9edbd, regionState=CLOSED 2024-12-09T17:23:32,807 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-12-09T17:23:32,807 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; CloseRegionProcedure a1b1655d3c492fdd18da414b0bd9edbd, server=80c69eb3c456,42927,1733764865379 in 1.5130 sec 2024-12-09T17:23:32,808 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=145 2024-12-09T17:23:32,808 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=145, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a1b1655d3c492fdd18da414b0bd9edbd, UNASSIGN in 1.5190 sec 2024-12-09T17:23:32,809 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-12-09T17:23:32,810 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5220 sec 2024-12-09T17:23:32,811 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733765012810"}]},"ts":"1733765012810"} 2024-12-09T17:23:32,811 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-09T17:23:32,850 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-09T17:23:32,854 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6110 sec 2024-12-09T17:23:33,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-09T17:23:33,349 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-12-09T17:23:33,349 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-09T17:23:33,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:23:33,351 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=148, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:23:33,351 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=148, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:23:33,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-09T17:23:33,353 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:33,355 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A, FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B, FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C, FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/recovered.edits] 2024-12-09T17:23:33,357 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/3b55bc3121b141649d051a08ee23d98f to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/3b55bc3121b141649d051a08ee23d98f 2024-12-09T17:23:33,358 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/a0b63150d0bc4abdb2fba5094665de1c to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/a0b63150d0bc4abdb2fba5094665de1c 2024-12-09T17:23:33,358 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/fdcd903443e544bf8b3e9bd24b2d7e27 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/A/fdcd903443e544bf8b3e9bd24b2d7e27 2024-12-09T17:23:33,360 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/7fa48ade8ab7443996a58bf5b9de6cb2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/7fa48ade8ab7443996a58bf5b9de6cb2 2024-12-09T17:23:33,361 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/c0feacadbc194b3bba40418e840ca552 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/c0feacadbc194b3bba40418e840ca552 2024-12-09T17:23:33,362 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/eb86879db05d4d3eae4fb40dee5ddd52 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/B/eb86879db05d4d3eae4fb40dee5ddd52 2024-12-09T17:23:33,363 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/649dcdb190294c5dba0dd88cc7ed1f08 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/649dcdb190294c5dba0dd88cc7ed1f08 2024-12-09T17:23:33,364 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/66681efd332849bc9cd6dd1279cc71f0 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/66681efd332849bc9cd6dd1279cc71f0 2024-12-09T17:23:33,365 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/be14b2b6269f467999db28b1a1b905dd to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/C/be14b2b6269f467999db28b1a1b905dd 2024-12-09T17:23:33,367 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/recovered.edits/324.seqid to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd/recovered.edits/324.seqid 2024-12-09T17:23:33,367 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/a1b1655d3c492fdd18da414b0bd9edbd 2024-12-09T17:23:33,367 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-09T17:23:33,368 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=148, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:23:33,369 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-09T17:23:33,371 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-09T17:23:33,371 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=148, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:23:33,371 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-09T17:23:33,371 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733765013371"}]},"ts":"9223372036854775807"} 2024-12-09T17:23:33,372 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-09T17:23:33,372 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => a1b1655d3c492fdd18da414b0bd9edbd, NAME => 'TestAcidGuarantees,,1733764988127.a1b1655d3c492fdd18da414b0bd9edbd.', STARTKEY => '', ENDKEY => ''}] 2024-12-09T17:23:33,372 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-09T17:23:33,373 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733765013373"}]},"ts":"9223372036854775807"} 2024-12-09T17:23:33,374 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-09T17:23:33,418 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=148, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:23:33,419 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 69 msec 2024-12-09T17:23:33,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-09T17:23:33,453 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-12-09T17:23:33,466 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=239 (was 237) - Thread LEAK? -, OpenFileDescriptor=451 (was 452), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=325 (was 294) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4203 (was 4225) 2024-12-09T17:23:33,474 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=239, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=325, ProcessCount=11, AvailableMemoryMB=4203 2024-12-09T17:23:33,475 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-09T17:23:33,475 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T17:23:33,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=149, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-09T17:23:33,477 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T17:23:33,477 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:33,477 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 149 2024-12-09T17:23:33,478 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T17:23:33,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-09T17:23:33,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742329_1505 (size=963) 2024-12-09T17:23:33,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-09T17:23:33,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-09T17:23:33,858 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T17:23:33,889 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4 2024-12-09T17:23:33,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742330_1506 (size=53) 2024-12-09T17:23:34,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-09T17:23:34,299 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T17:23:34,299 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 066f9d7bacdaa47fdbd6944bdacfa683, disabling compactions & flushes 2024-12-09T17:23:34,299 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:34,299 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:34,300 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. after waiting 0 ms 2024-12-09T17:23:34,300 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:34,300 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:34,300 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:34,302 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T17:23:34,302 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733765014302"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733765014302"}]},"ts":"1733765014302"} 2024-12-09T17:23:34,304 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-09T17:23:34,305 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T17:23:34,305 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733765014305"}]},"ts":"1733765014305"} 2024-12-09T17:23:34,307 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-09T17:23:34,377 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=066f9d7bacdaa47fdbd6944bdacfa683, ASSIGN}] 2024-12-09T17:23:34,379 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=066f9d7bacdaa47fdbd6944bdacfa683, ASSIGN 2024-12-09T17:23:34,380 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=066f9d7bacdaa47fdbd6944bdacfa683, ASSIGN; state=OFFLINE, location=80c69eb3c456,42927,1733764865379; forceNewPlan=false, retain=false 2024-12-09T17:23:34,531 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=066f9d7bacdaa47fdbd6944bdacfa683, regionState=OPENING, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:34,533 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; OpenRegionProcedure 066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379}] 2024-12-09T17:23:34,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-09T17:23:34,686 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:34,693 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:34,693 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7285): Opening region: {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} 2024-12-09T17:23:34,694 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:34,694 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T17:23:34,694 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7327): checking encryption for 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:34,694 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7330): checking classloading for 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:34,697 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:34,699 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:23:34,699 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 066f9d7bacdaa47fdbd6944bdacfa683 columnFamilyName A 2024-12-09T17:23:34,699 DEBUG [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:34,700 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] regionserver.HStore(327): Store=066f9d7bacdaa47fdbd6944bdacfa683/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:23:34,700 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:34,701 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:23:34,701 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 066f9d7bacdaa47fdbd6944bdacfa683 columnFamilyName B 2024-12-09T17:23:34,701 DEBUG [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:34,701 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] regionserver.HStore(327): Store=066f9d7bacdaa47fdbd6944bdacfa683/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:23:34,701 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:34,702 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:23:34,702 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 066f9d7bacdaa47fdbd6944bdacfa683 columnFamilyName C 2024-12-09T17:23:34,702 DEBUG [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:34,703 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] regionserver.HStore(327): Store=066f9d7bacdaa47fdbd6944bdacfa683/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:23:34,703 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:34,703 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:34,704 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:34,705 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T17:23:34,706 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1085): writing seq id for 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:34,708 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T17:23:34,708 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1102): Opened 066f9d7bacdaa47fdbd6944bdacfa683; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74556512, jitterRate=0.11097860336303711}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T17:23:34,709 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1001): Region open journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:34,709 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683., pid=151, masterSystemTime=1733765014686 2024-12-09T17:23:34,711 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:34,711 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:34,712 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=066f9d7bacdaa47fdbd6944bdacfa683, regionState=OPEN, openSeqNum=2, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:34,714 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-12-09T17:23:34,714 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; OpenRegionProcedure 066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 in 180 msec 2024-12-09T17:23:34,716 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-12-09T17:23:34,716 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=066f9d7bacdaa47fdbd6944bdacfa683, ASSIGN in 337 msec 2024-12-09T17:23:34,717 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T17:23:34,717 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733765014717"}]},"ts":"1733765014717"} 2024-12-09T17:23:34,718 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-09T17:23:34,727 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T17:23:34,729 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2510 sec 2024-12-09T17:23:35,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-09T17:23:35,587 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 149 completed 2024-12-09T17:23:35,589 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x19952f0c to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@151bac0d 2024-12-09T17:23:35,654 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a1fe6e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:23:35,658 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:23:35,660 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58386, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:23:35,662 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T17:23:35,664 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60238, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T17:23:35,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-09T17:23:35,666 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T17:23:35,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-09T17:23:35,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742331_1507 (size=999) 2024-12-09T17:23:36,083 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-09T17:23:36,083 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-09T17:23:36,088 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-09T17:23:36,090 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=066f9d7bacdaa47fdbd6944bdacfa683, REOPEN/MOVE}] 2024-12-09T17:23:36,091 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=066f9d7bacdaa47fdbd6944bdacfa683, REOPEN/MOVE 2024-12-09T17:23:36,091 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=066f9d7bacdaa47fdbd6944bdacfa683, regionState=CLOSING, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:36,093 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T17:23:36,093 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE; CloseRegionProcedure 066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379}] 2024-12-09T17:23:36,244 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:36,245 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(124): Close 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:36,245 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-09T17:23:36,246 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1681): Closing 066f9d7bacdaa47fdbd6944bdacfa683, disabling compactions & flushes 2024-12-09T17:23:36,246 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:36,246 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:36,246 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. after waiting 0 ms 2024-12-09T17:23:36,246 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:36,253 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-09T17:23:36,254 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:36,254 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1635): Region close journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:36,255 WARN [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegionServer(3786): Not adding moved region record: 066f9d7bacdaa47fdbd6944bdacfa683 to self. 2024-12-09T17:23:36,256 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(170): Closed 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:36,257 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=066f9d7bacdaa47fdbd6944bdacfa683, regionState=CLOSED 2024-12-09T17:23:36,260 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-12-09T17:23:36,260 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; CloseRegionProcedure 066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 in 165 msec 2024-12-09T17:23:36,260 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=066f9d7bacdaa47fdbd6944bdacfa683, REOPEN/MOVE; state=CLOSED, location=80c69eb3c456,42927,1733764865379; forceNewPlan=false, retain=true 2024-12-09T17:23:36,411 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=066f9d7bacdaa47fdbd6944bdacfa683, regionState=OPENING, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:36,414 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=154, state=RUNNABLE; OpenRegionProcedure 066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379}] 2024-12-09T17:23:36,566 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:36,573 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:36,573 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(7285): Opening region: {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} 2024-12-09T17:23:36,574 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:36,574 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T17:23:36,574 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(7327): checking encryption for 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:36,574 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(7330): checking classloading for 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:36,577 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:36,578 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:23:36,578 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 066f9d7bacdaa47fdbd6944bdacfa683 columnFamilyName A 2024-12-09T17:23:36,580 DEBUG [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:36,580 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] regionserver.HStore(327): Store=066f9d7bacdaa47fdbd6944bdacfa683/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:23:36,580 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:36,580 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:23:36,581 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 066f9d7bacdaa47fdbd6944bdacfa683 columnFamilyName B 2024-12-09T17:23:36,581 DEBUG [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:36,581 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] regionserver.HStore(327): Store=066f9d7bacdaa47fdbd6944bdacfa683/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:23:36,581 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:36,581 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-09T17:23:36,581 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 066f9d7bacdaa47fdbd6944bdacfa683 columnFamilyName C 2024-12-09T17:23:36,581 DEBUG [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:36,582 INFO [StoreOpener-066f9d7bacdaa47fdbd6944bdacfa683-1 {}] regionserver.HStore(327): Store=066f9d7bacdaa47fdbd6944bdacfa683/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T17:23:36,582 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:36,582 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:36,583 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:36,584 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T17:23:36,585 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1085): writing seq id for 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:36,585 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1102): Opened 066f9d7bacdaa47fdbd6944bdacfa683; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68682395, jitterRate=0.023447439074516296}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T17:23:36,586 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1001): Region open journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:36,586 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683., pid=156, masterSystemTime=1733765016566 2024-12-09T17:23:36,587 DEBUG [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:36,587 INFO [RS_OPEN_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:36,587 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=066f9d7bacdaa47fdbd6944bdacfa683, regionState=OPEN, openSeqNum=5, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:36,589 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=154 2024-12-09T17:23:36,589 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=154, state=SUCCESS; OpenRegionProcedure 066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 in 174 msec 2024-12-09T17:23:36,590 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-12-09T17:23:36,590 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=066f9d7bacdaa47fdbd6944bdacfa683, REOPEN/MOVE in 499 msec 2024-12-09T17:23:36,591 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-12-09T17:23:36,591 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 502 msec 2024-12-09T17:23:36,592 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 924 msec 2024-12-09T17:23:36,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-09T17:23:36,593 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68ba132a to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2089b1f4 2024-12-09T17:23:36,635 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55544bc7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:23:36,636 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x081e0163 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@65f51785 2024-12-09T17:23:36,645 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1208728f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:23:36,646 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x71c377ac to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3cc71f2e 2024-12-09T17:23:36,659 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d0a9e33, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:23:36,660 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7d1de3c9 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79a7bd2b 2024-12-09T17:23:36,668 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40e55f2a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:23:36,669 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09e22139 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4d688bcb 2024-12-09T17:23:36,677 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@271e8143, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:23:36,677 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x31f7e171 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62b06a95 2024-12-09T17:23:36,685 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a5ecd59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:23:36,686 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6c078737 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d02ace0 2024-12-09T17:23:36,694 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61da8c1c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:23:36,694 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7bf8843a to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@63054209 2024-12-09T17:23:36,702 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@560a8819, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:23:36,703 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x76670256 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3fbb1399 2024-12-09T17:23:36,710 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3df30e37, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:23:36,711 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x36bc3633 to 127.0.0.1:54326 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@51fccca6 2024-12-09T17:23:36,719 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@745bf218, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T17:23:36,721 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:23:36,722 DEBUG [hconnection-0x6aa8930-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:23:36,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees 2024-12-09T17:23:36,722 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:23:36,723 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:23:36,723 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:23:36,723 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58398, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:23:36,723 DEBUG [hconnection-0x19b53e60-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:23:36,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-09T17:23:36,724 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58404, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:23:36,727 DEBUG [hconnection-0x128e2803-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:23:36,728 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58420, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:23:36,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:36,728 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 066f9d7bacdaa47fdbd6944bdacfa683 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-09T17:23:36,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=A 2024-12-09T17:23:36,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:36,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=B 2024-12-09T17:23:36,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:36,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=C 2024-12-09T17:23:36,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:36,731 DEBUG [hconnection-0x176b3278-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:23:36,732 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58430, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:23:36,733 DEBUG [hconnection-0x27dc0d1f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:23:36,734 DEBUG [hconnection-0xd0fa45b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:23:36,734 DEBUG [hconnection-0x2f5776f1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:23:36,734 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58434, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:23:36,734 DEBUG [hconnection-0x5308e864-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:23:36,735 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58456, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:23:36,735 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58450, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:23:36,735 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58458, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:23:36,736 DEBUG [hconnection-0x42596d63-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:23:36,737 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58466, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:23:36,739 DEBUG [hconnection-0x1e1b3ff8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T17:23:36,743 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58482, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T17:23:36,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:36,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765076747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:36,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:36,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58398 deadline: 1733765076747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:36,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:36,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765076747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:36,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:36,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765076749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:36,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:36,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765076749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:36,756 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120951857be877d640689922d90973e63398_066f9d7bacdaa47fdbd6944bdacfa683 is 50, key is test_row_0/A:col10/1733765016725/Put/seqid=0 2024-12-09T17:23:36,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742332_1508 (size=12154) 2024-12-09T17:23:36,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-09T17:23:36,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:36,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765076850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:36,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:36,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765076851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:36,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:36,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765076851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:36,852 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:36,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765076852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:36,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:36,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58398 deadline: 1733765076859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:36,874 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:36,874 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-09T17:23:36,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:36,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:36,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:36,875 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:36,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:36,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:37,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-09T17:23:37,026 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,027 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-09T17:23:37,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:37,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:37,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:37,027 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:37,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:37,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:37,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:37,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765077052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:37,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765077053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,054 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:37,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765077053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:37,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765077054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:37,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58398 deadline: 1733765077061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,160 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:37,163 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120951857be877d640689922d90973e63398_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120951857be877d640689922d90973e63398_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:37,163 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/19820fd114854c9b923e5e2b590a1633, store: [table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:37,164 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/19820fd114854c9b923e5e2b590a1633 is 175, key is test_row_0/A:col10/1733765016725/Put/seqid=0 2024-12-09T17:23:37,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742333_1509 (size=30955) 2024-12-09T17:23:37,167 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/19820fd114854c9b923e5e2b590a1633 2024-12-09T17:23:37,178 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,179 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-09T17:23:37,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:37,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:37,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:37,179 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:37,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:37,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:37,184 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/ea891d5e0f244c02a791c42509d0bdfd is 50, key is test_row_0/B:col10/1733765016725/Put/seqid=0 2024-12-09T17:23:37,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742334_1510 (size=12001) 2024-12-09T17:23:37,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-09T17:23:37,331 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,331 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-09T17:23:37,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:37,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:37,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:37,332 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:37,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:37,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:37,355 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:37,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765077354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,355 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:37,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765077354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,357 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:37,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765077357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:37,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765077357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:37,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58398 deadline: 1733765077363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,483 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,484 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-09T17:23:37,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:37,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:37,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:37,484 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:37,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:37,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:37,589 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/ea891d5e0f244c02a791c42509d0bdfd 2024-12-09T17:23:37,604 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/ca80a36913d9438084eacc72e78faf3c is 50, key is test_row_0/C:col10/1733765016725/Put/seqid=0 2024-12-09T17:23:37,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742335_1511 (size=12001) 2024-12-09T17:23:37,607 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/ca80a36913d9438084eacc72e78faf3c 2024-12-09T17:23:37,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/19820fd114854c9b923e5e2b590a1633 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/19820fd114854c9b923e5e2b590a1633 2024-12-09T17:23:37,624 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/19820fd114854c9b923e5e2b590a1633, entries=150, sequenceid=16, filesize=30.2 K 2024-12-09T17:23:37,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/ea891d5e0f244c02a791c42509d0bdfd as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/ea891d5e0f244c02a791c42509d0bdfd 2024-12-09T17:23:37,627 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/ea891d5e0f244c02a791c42509d0bdfd, entries=150, sequenceid=16, filesize=11.7 K 2024-12-09T17:23:37,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/ca80a36913d9438084eacc72e78faf3c as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/ca80a36913d9438084eacc72e78faf3c 2024-12-09T17:23:37,631 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/ca80a36913d9438084eacc72e78faf3c, entries=150, sequenceid=16, filesize=11.7 K 2024-12-09T17:23:37,632 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 066f9d7bacdaa47fdbd6944bdacfa683 in 904ms, sequenceid=16, compaction requested=false 2024-12-09T17:23:37,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:37,635 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,636 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-09T17:23:37,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:37,636 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2837): Flushing 066f9d7bacdaa47fdbd6944bdacfa683 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-09T17:23:37,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=A 2024-12-09T17:23:37,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:37,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=B 2024-12-09T17:23:37,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:37,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=C 2024-12-09T17:23:37,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:37,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209e4664a0dac114bb4a67780f40b28fab6_066f9d7bacdaa47fdbd6944bdacfa683 is 50, key is test_row_0/A:col10/1733765016748/Put/seqid=0 2024-12-09T17:23:37,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742336_1512 (size=12154) 2024-12-09T17:23:37,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-09T17:23:37,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:37,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:37,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:37,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765077863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:37,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765077863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:37,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765077864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:37,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765077864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:37,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58398 deadline: 1733765077866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:37,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765077965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:37,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765077966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:37,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765077966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:37,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:37,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765077966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:38,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:38,057 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209e4664a0dac114bb4a67780f40b28fab6_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209e4664a0dac114bb4a67780f40b28fab6_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:38,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/6e4769c81e0a4a26bb6b99f4db961bcd, store: [table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:38,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/6e4769c81e0a4a26bb6b99f4db961bcd is 175, key is test_row_0/A:col10/1733765016748/Put/seqid=0 2024-12-09T17:23:38,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742337_1513 (size=30955) 2024-12-09T17:23:38,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:38,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765078167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:38,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:38,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765078168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:38,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:38,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765078168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:38,169 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:38,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765078168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:38,319 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-09T17:23:38,462 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/6e4769c81e0a4a26bb6b99f4db961bcd 2024-12-09T17:23:38,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/91053d5e28084a1d8484e11436388e98 is 50, key is test_row_0/B:col10/1733765016748/Put/seqid=0 2024-12-09T17:23:38,470 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:38,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765078469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:38,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:38,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742338_1514 (size=12001) 2024-12-09T17:23:38,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765078471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:38,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:38,473 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:38,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765078471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:38,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765078471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:38,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-09T17:23:38,873 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/91053d5e28084a1d8484e11436388e98 2024-12-09T17:23:38,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/e20c3cdb4ee54a73bfed81fd2b9ef224 is 50, key is test_row_0/C:col10/1733765016748/Put/seqid=0 2024-12-09T17:23:38,878 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:38,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58398 deadline: 1733765078877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:38,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742339_1515 (size=12001) 2024-12-09T17:23:38,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:38,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765078973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:38,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:38,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765078974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:38,977 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:38,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765078975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:38,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:38,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765078977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:39,283 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/e20c3cdb4ee54a73bfed81fd2b9ef224 2024-12-09T17:23:39,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/6e4769c81e0a4a26bb6b99f4db961bcd as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/6e4769c81e0a4a26bb6b99f4db961bcd 2024-12-09T17:23:39,288 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/6e4769c81e0a4a26bb6b99f4db961bcd, entries=150, sequenceid=40, filesize=30.2 K 2024-12-09T17:23:39,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/91053d5e28084a1d8484e11436388e98 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/91053d5e28084a1d8484e11436388e98 2024-12-09T17:23:39,291 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/91053d5e28084a1d8484e11436388e98, entries=150, sequenceid=40, filesize=11.7 K 2024-12-09T17:23:39,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/e20c3cdb4ee54a73bfed81fd2b9ef224 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/e20c3cdb4ee54a73bfed81fd2b9ef224 2024-12-09T17:23:39,293 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/e20c3cdb4ee54a73bfed81fd2b9ef224, entries=150, sequenceid=40, filesize=11.7 K 2024-12-09T17:23:39,294 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 066f9d7bacdaa47fdbd6944bdacfa683 in 1658ms, sequenceid=40, compaction requested=false 2024-12-09T17:23:39,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2538): Flush status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:39,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:39,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=158 2024-12-09T17:23:39,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=158 2024-12-09T17:23:39,296 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-12-09T17:23:39,296 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5720 sec 2024-12-09T17:23:39,296 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees in 2.5750 sec 2024-12-09T17:23:39,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:39,980 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 066f9d7bacdaa47fdbd6944bdacfa683 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-09T17:23:39,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=A 2024-12-09T17:23:39,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:39,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=B 2024-12-09T17:23:39,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:39,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=C 2024-12-09T17:23:39,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:39,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120966106c0b5ec44843b53b43c44e3fcd90_066f9d7bacdaa47fdbd6944bdacfa683 is 50, key is test_row_0/A:col10/1733765019979/Put/seqid=0 2024-12-09T17:23:39,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742340_1516 (size=14594) 2024-12-09T17:23:39,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:39,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765079995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:40,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:40,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765079997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:40,000 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:40,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765079998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:40,001 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:40,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765079999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:40,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:40,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765080099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:40,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:40,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765080101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:40,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:40,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765080101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:40,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:40,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765080101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:40,303 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:40,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765080302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:40,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:40,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765080303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:40,305 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:40,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765080304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:40,305 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:40,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765080304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:40,392 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:40,394 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120966106c0b5ec44843b53b43c44e3fcd90_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120966106c0b5ec44843b53b43c44e3fcd90_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:40,395 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/a51ab9c3c845410f9b7bd22f5c9e02fd, store: [table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:40,396 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/a51ab9c3c845410f9b7bd22f5c9e02fd is 175, key is test_row_0/A:col10/1733765019979/Put/seqid=0 2024-12-09T17:23:40,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742341_1517 (size=39549) 2024-12-09T17:23:40,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:40,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765080605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:40,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:40,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765080606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:40,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:40,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765080606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:40,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:40,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765080606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:40,810 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/a51ab9c3c845410f9b7bd22f5c9e02fd 2024-12-09T17:23:40,814 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/2d7bec68ee0d4b5cad62cef3b12e1829 is 50, key is test_row_0/B:col10/1733765019979/Put/seqid=0 2024-12-09T17:23:40,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742342_1518 (size=12001) 2024-12-09T17:23:40,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-09T17:23:40,827 INFO [Thread-2274 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-12-09T17:23:40,828 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:23:40,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-12-09T17:23:40,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-09T17:23:40,829 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:23:40,830 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:23:40,830 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:23:40,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:40,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58398 deadline: 1733765080887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:40,890 DEBUG [Thread-2268 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4142 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683., hostname=80c69eb3c456,42927,1733764865379, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:23:40,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-09T17:23:40,981 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:40,981 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-09T17:23:40,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:40,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:40,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:40,981 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:40,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:40,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:41,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:41,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765081107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:41,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:41,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765081108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:41,109 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:41,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765081108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:41,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:41,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765081110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:41,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-09T17:23:41,133 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:41,133 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-09T17:23:41,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:41,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:41,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:41,133 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:41,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:41,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:41,221 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/2d7bec68ee0d4b5cad62cef3b12e1829 2024-12-09T17:23:41,240 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/cb2cd9bd54ba444b82cfe4e2d3901b59 is 50, key is test_row_0/C:col10/1733765019979/Put/seqid=0 2024-12-09T17:23:41,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742343_1519 (size=12001) 2024-12-09T17:23:41,285 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:41,285 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-09T17:23:41,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:41,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:41,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:41,285 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:41,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:41,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:41,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-09T17:23:41,437 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:41,437 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-09T17:23:41,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:41,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:41,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:41,437 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:41,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:41,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:41,589 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:41,589 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-09T17:23:41,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:41,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:41,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:41,590 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:41,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:41,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:41,645 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/cb2cd9bd54ba444b82cfe4e2d3901b59 2024-12-09T17:23:41,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/a51ab9c3c845410f9b7bd22f5c9e02fd as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/a51ab9c3c845410f9b7bd22f5c9e02fd 2024-12-09T17:23:41,651 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/a51ab9c3c845410f9b7bd22f5c9e02fd, entries=200, sequenceid=53, filesize=38.6 K 2024-12-09T17:23:41,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/2d7bec68ee0d4b5cad62cef3b12e1829 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/2d7bec68ee0d4b5cad62cef3b12e1829 2024-12-09T17:23:41,654 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/2d7bec68ee0d4b5cad62cef3b12e1829, entries=150, sequenceid=53, filesize=11.7 K 2024-12-09T17:23:41,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/cb2cd9bd54ba444b82cfe4e2d3901b59 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/cb2cd9bd54ba444b82cfe4e2d3901b59 2024-12-09T17:23:41,657 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/cb2cd9bd54ba444b82cfe4e2d3901b59, entries=150, sequenceid=53, filesize=11.7 K 2024-12-09T17:23:41,658 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 066f9d7bacdaa47fdbd6944bdacfa683 in 1678ms, sequenceid=53, compaction requested=true 2024-12-09T17:23:41,658 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:41,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 066f9d7bacdaa47fdbd6944bdacfa683:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:23:41,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:41,658 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:41,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 066f9d7bacdaa47fdbd6944bdacfa683:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:23:41,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:41,658 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:41,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 066f9d7bacdaa47fdbd6944bdacfa683:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:23:41,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:41,658 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:41,658 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:41,659 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 066f9d7bacdaa47fdbd6944bdacfa683/B is initiating minor compaction (all files) 2024-12-09T17:23:41,659 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 066f9d7bacdaa47fdbd6944bdacfa683/A is initiating minor compaction (all files) 2024-12-09T17:23:41,659 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 066f9d7bacdaa47fdbd6944bdacfa683/B in TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:41,659 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 066f9d7bacdaa47fdbd6944bdacfa683/A in TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:41,659 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/ea891d5e0f244c02a791c42509d0bdfd, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/91053d5e28084a1d8484e11436388e98, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/2d7bec68ee0d4b5cad62cef3b12e1829] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp, totalSize=35.2 K 2024-12-09T17:23:41,659 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/19820fd114854c9b923e5e2b590a1633, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/6e4769c81e0a4a26bb6b99f4db961bcd, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/a51ab9c3c845410f9b7bd22f5c9e02fd] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp, totalSize=99.1 K 2024-12-09T17:23:41,659 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:41,659 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. files: [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/19820fd114854c9b923e5e2b590a1633, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/6e4769c81e0a4a26bb6b99f4db961bcd, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/a51ab9c3c845410f9b7bd22f5c9e02fd] 2024-12-09T17:23:41,659 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting ea891d5e0f244c02a791c42509d0bdfd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733765016725 2024-12-09T17:23:41,659 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 19820fd114854c9b923e5e2b590a1633, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733765016725 2024-12-09T17:23:41,659 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 91053d5e28084a1d8484e11436388e98, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733765016746 2024-12-09T17:23:41,659 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e4769c81e0a4a26bb6b99f4db961bcd, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733765016746 2024-12-09T17:23:41,660 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d7bec68ee0d4b5cad62cef3b12e1829, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733765017860 2024-12-09T17:23:41,660 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting a51ab9c3c845410f9b7bd22f5c9e02fd, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733765017860 2024-12-09T17:23:41,663 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:41,664 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241209aa4d828dabde4d7d890f0a816e82a1b4_066f9d7bacdaa47fdbd6944bdacfa683 store=[table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:41,665 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 066f9d7bacdaa47fdbd6944bdacfa683#B#compaction#442 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:41,665 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241209aa4d828dabde4d7d890f0a816e82a1b4_066f9d7bacdaa47fdbd6944bdacfa683, store=[table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:41,665 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209aa4d828dabde4d7d890f0a816e82a1b4_066f9d7bacdaa47fdbd6944bdacfa683 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:41,665 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/d3b025f9f38f47e8ac34dde5322d06cf is 50, key is test_row_0/B:col10/1733765019979/Put/seqid=0 2024-12-09T17:23:41,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742345_1521 (size=12104) 2024-12-09T17:23:41,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742344_1520 (size=4469) 2024-12-09T17:23:41,678 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 066f9d7bacdaa47fdbd6944bdacfa683#A#compaction#441 average throughput is 1.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:41,678 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/c495ab3ac9c54f04b14d07f17663fe5b is 175, key is test_row_0/A:col10/1733765019979/Put/seqid=0 2024-12-09T17:23:41,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742346_1522 (size=31058) 2024-12-09T17:23:41,741 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:41,741 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-09T17:23:41,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:41,742 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing 066f9d7bacdaa47fdbd6944bdacfa683 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-09T17:23:41,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=A 2024-12-09T17:23:41,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:41,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=B 2024-12-09T17:23:41,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:41,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=C 2024-12-09T17:23:41,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:41,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209c1be84397f77481985e92dce1c881597_066f9d7bacdaa47fdbd6944bdacfa683 is 50, key is test_row_0/A:col10/1733765019996/Put/seqid=0 2024-12-09T17:23:41,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742347_1523 (size=12154) 2024-12-09T17:23:41,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-09T17:23:42,074 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/d3b025f9f38f47e8ac34dde5322d06cf as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/d3b025f9f38f47e8ac34dde5322d06cf 2024-12-09T17:23:42,077 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 066f9d7bacdaa47fdbd6944bdacfa683/B of 066f9d7bacdaa47fdbd6944bdacfa683 into d3b025f9f38f47e8ac34dde5322d06cf(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:42,077 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:42,077 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683., storeName=066f9d7bacdaa47fdbd6944bdacfa683/B, priority=13, startTime=1733765021658; duration=0sec 2024-12-09T17:23:42,077 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:42,077 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 066f9d7bacdaa47fdbd6944bdacfa683:B 2024-12-09T17:23:42,077 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:42,078 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:42,078 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 066f9d7bacdaa47fdbd6944bdacfa683/C is initiating minor compaction (all files) 2024-12-09T17:23:42,078 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 066f9d7bacdaa47fdbd6944bdacfa683/C in TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:42,078 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/ca80a36913d9438084eacc72e78faf3c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/e20c3cdb4ee54a73bfed81fd2b9ef224, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/cb2cd9bd54ba444b82cfe4e2d3901b59] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp, totalSize=35.2 K 2024-12-09T17:23:42,078 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting ca80a36913d9438084eacc72e78faf3c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733765016725 2024-12-09T17:23:42,078 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting e20c3cdb4ee54a73bfed81fd2b9ef224, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733765016746 2024-12-09T17:23:42,079 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting cb2cd9bd54ba444b82cfe4e2d3901b59, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733765017860 2024-12-09T17:23:42,087 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/c495ab3ac9c54f04b14d07f17663fe5b as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/c495ab3ac9c54f04b14d07f17663fe5b 2024-12-09T17:23:42,088 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 066f9d7bacdaa47fdbd6944bdacfa683#C#compaction#444 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:42,089 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/a0d14667d7794e018f92b35836f6a841 is 50, key is test_row_0/C:col10/1733765019979/Put/seqid=0 2024-12-09T17:23:42,090 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 066f9d7bacdaa47fdbd6944bdacfa683/A of 066f9d7bacdaa47fdbd6944bdacfa683 into c495ab3ac9c54f04b14d07f17663fe5b(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:42,090 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:42,090 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683., storeName=066f9d7bacdaa47fdbd6944bdacfa683/A, priority=13, startTime=1733765021658; duration=0sec 2024-12-09T17:23:42,090 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:42,090 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 066f9d7bacdaa47fdbd6944bdacfa683:A 2024-12-09T17:23:42,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742348_1524 (size=12104) 2024-12-09T17:23:42,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:42,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:42,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:42,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765082117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:42,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:42,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765082117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:42,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:42,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765082118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:42,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:42,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765082119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:42,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:42,152 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209c1be84397f77481985e92dce1c881597_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209c1be84397f77481985e92dce1c881597_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:42,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/33317fadadbf49be8412f6f60aaa10d2, store: [table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:42,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/33317fadadbf49be8412f6f60aaa10d2 is 175, key is test_row_0/A:col10/1733765019996/Put/seqid=0 2024-12-09T17:23:42,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742349_1525 (size=30955) 2024-12-09T17:23:42,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:42,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765082220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:42,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:42,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765082220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:42,223 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:42,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765082221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:42,223 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:42,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765082222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:42,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:42,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765082423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:42,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:42,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765082424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:42,426 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:42,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765082424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:42,426 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:42,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765082424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:42,494 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/a0d14667d7794e018f92b35836f6a841 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/a0d14667d7794e018f92b35836f6a841 2024-12-09T17:23:42,497 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 066f9d7bacdaa47fdbd6944bdacfa683/C of 066f9d7bacdaa47fdbd6944bdacfa683 into a0d14667d7794e018f92b35836f6a841(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:42,497 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:42,497 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683., storeName=066f9d7bacdaa47fdbd6944bdacfa683/C, priority=13, startTime=1733765021658; duration=0sec 2024-12-09T17:23:42,497 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:42,497 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 066f9d7bacdaa47fdbd6944bdacfa683:C 2024-12-09T17:23:42,557 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/33317fadadbf49be8412f6f60aaa10d2 2024-12-09T17:23:42,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/b3e0dd7fc2d840b6a78a9df1762c6eea is 50, key is test_row_0/B:col10/1733765019996/Put/seqid=0 2024-12-09T17:23:42,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742350_1526 (size=12001) 2024-12-09T17:23:42,726 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:42,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765082725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:42,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:42,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765082726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:42,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:42,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765082727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:42,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:42,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765082727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:42,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-09T17:23:42,966 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/b3e0dd7fc2d840b6a78a9df1762c6eea 2024-12-09T17:23:42,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/e1bcc86b1e20445cad4d5d1be40625ab is 50, key is test_row_0/C:col10/1733765019996/Put/seqid=0 2024-12-09T17:23:42,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742351_1527 (size=12001) 2024-12-09T17:23:43,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:43,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765083228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:43,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:43,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765083228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:43,231 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:43,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765083230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:43,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:43,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765083232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:43,374 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/e1bcc86b1e20445cad4d5d1be40625ab 2024-12-09T17:23:43,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/33317fadadbf49be8412f6f60aaa10d2 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/33317fadadbf49be8412f6f60aaa10d2 2024-12-09T17:23:43,379 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/33317fadadbf49be8412f6f60aaa10d2, entries=150, sequenceid=77, filesize=30.2 K 2024-12-09T17:23:43,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/b3e0dd7fc2d840b6a78a9df1762c6eea as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/b3e0dd7fc2d840b6a78a9df1762c6eea 2024-12-09T17:23:43,382 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/b3e0dd7fc2d840b6a78a9df1762c6eea, entries=150, sequenceid=77, filesize=11.7 K 2024-12-09T17:23:43,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/e1bcc86b1e20445cad4d5d1be40625ab as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/e1bcc86b1e20445cad4d5d1be40625ab 2024-12-09T17:23:43,385 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/e1bcc86b1e20445cad4d5d1be40625ab, entries=150, sequenceid=77, filesize=11.7 K 2024-12-09T17:23:43,385 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 066f9d7bacdaa47fdbd6944bdacfa683 in 1643ms, sequenceid=77, compaction requested=false 2024-12-09T17:23:43,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:43,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:43,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-12-09T17:23:43,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-12-09T17:23:43,387 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-12-09T17:23:43,387 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5560 sec 2024-12-09T17:23:43,388 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 2.5590 sec 2024-12-09T17:23:44,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:44,235 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 066f9d7bacdaa47fdbd6944bdacfa683 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-09T17:23:44,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=A 2024-12-09T17:23:44,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:44,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=B 2024-12-09T17:23:44,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:44,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=C 2024-12-09T17:23:44,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:44,240 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209ad0c39e6b0d142e6a9964bab7cc1d1e9_066f9d7bacdaa47fdbd6944bdacfa683 is 50, key is test_row_0/A:col10/1733765022116/Put/seqid=0 2024-12-09T17:23:44,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742352_1528 (size=17034) 2024-12-09T17:23:44,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:44,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765084248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:44,250 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:44,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765084249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:44,252 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:44,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765084249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:44,252 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:44,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765084250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:44,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:44,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765084351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:44,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:44,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765084351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:44,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:44,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765084352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:44,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:44,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765084352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:44,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:44,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765084553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:44,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:44,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765084553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:44,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:44,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765084555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:44,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:44,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765084555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:44,644 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:44,646 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209ad0c39e6b0d142e6a9964bab7cc1d1e9_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209ad0c39e6b0d142e6a9964bab7cc1d1e9_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:44,647 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/d32efabe259a4cc9a2dfdc67b50fc26e, store: [table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:44,647 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/d32efabe259a4cc9a2dfdc67b50fc26e is 175, key is test_row_0/A:col10/1733765022116/Put/seqid=0 2024-12-09T17:23:44,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742353_1529 (size=48139) 2024-12-09T17:23:44,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:44,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765084857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:44,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:44,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765084857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:44,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:44,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765084857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:44,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:44,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765084859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:44,913 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:44,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58398 deadline: 1733765084912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:44,914 DEBUG [Thread-2268 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8167 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683., hostname=80c69eb3c456,42927,1733764865379, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T17:23:44,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-09T17:23:44,933 INFO [Thread-2274 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-12-09T17:23:44,934 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:23:44,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-12-09T17:23:44,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-09T17:23:44,935 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:23:44,935 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:23:44,935 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:23:45,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-09T17:23:45,051 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/d32efabe259a4cc9a2dfdc67b50fc26e 2024-12-09T17:23:45,055 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/72d153d53b60466098457f57e0123717 is 50, key is test_row_0/B:col10/1733765022116/Put/seqid=0 2024-12-09T17:23:45,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742354_1530 (size=12001) 2024-12-09T17:23:45,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/72d153d53b60466098457f57e0123717 2024-12-09T17:23:45,062 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/8ed2cf5373e14100bc615ea3b11173e4 is 50, key is test_row_0/C:col10/1733765022116/Put/seqid=0 2024-12-09T17:23:45,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742355_1531 (size=12001) 2024-12-09T17:23:45,087 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:45,087 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-09T17:23:45,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:45,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:45,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:45,087 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:45,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:45,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:45,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-09T17:23:45,239 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:45,239 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-09T17:23:45,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:45,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:45,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:45,239 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:45,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:45,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:45,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:45,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765085361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:45,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:45,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765085361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:45,364 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:45,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765085363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:45,364 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:45,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765085363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:45,391 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:45,392 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-09T17:23:45,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:45,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:45,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:45,392 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:45,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:45,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:45,465 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/8ed2cf5373e14100bc615ea3b11173e4 2024-12-09T17:23:45,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/d32efabe259a4cc9a2dfdc67b50fc26e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/d32efabe259a4cc9a2dfdc67b50fc26e 2024-12-09T17:23:45,470 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/d32efabe259a4cc9a2dfdc67b50fc26e, entries=250, sequenceid=94, filesize=47.0 K 2024-12-09T17:23:45,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/72d153d53b60466098457f57e0123717 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/72d153d53b60466098457f57e0123717 2024-12-09T17:23:45,473 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/72d153d53b60466098457f57e0123717, entries=150, sequenceid=94, filesize=11.7 K 2024-12-09T17:23:45,473 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/8ed2cf5373e14100bc615ea3b11173e4 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/8ed2cf5373e14100bc615ea3b11173e4 2024-12-09T17:23:45,476 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/8ed2cf5373e14100bc615ea3b11173e4, entries=150, sequenceid=94, filesize=11.7 K 2024-12-09T17:23:45,476 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 066f9d7bacdaa47fdbd6944bdacfa683 in 1241ms, sequenceid=94, compaction requested=true 2024-12-09T17:23:45,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:45,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 066f9d7bacdaa47fdbd6944bdacfa683:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:23:45,477 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:45,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:45,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 066f9d7bacdaa47fdbd6944bdacfa683:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:23:45,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:45,477 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:45,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 066f9d7bacdaa47fdbd6944bdacfa683:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:23:45,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:45,478 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:45,478 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110152 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:45,478 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 066f9d7bacdaa47fdbd6944bdacfa683/B is initiating minor compaction (all files) 2024-12-09T17:23:45,478 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 066f9d7bacdaa47fdbd6944bdacfa683/A is initiating minor compaction (all files) 2024-12-09T17:23:45,478 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 066f9d7bacdaa47fdbd6944bdacfa683/B in TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:45,478 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 066f9d7bacdaa47fdbd6944bdacfa683/A in TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:45,478 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/d3b025f9f38f47e8ac34dde5322d06cf, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/b3e0dd7fc2d840b6a78a9df1762c6eea, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/72d153d53b60466098457f57e0123717] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp, totalSize=35.3 K 2024-12-09T17:23:45,478 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/c495ab3ac9c54f04b14d07f17663fe5b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/33317fadadbf49be8412f6f60aaa10d2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/d32efabe259a4cc9a2dfdc67b50fc26e] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp, totalSize=107.6 K 2024-12-09T17:23:45,478 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:45,478 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. files: [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/c495ab3ac9c54f04b14d07f17663fe5b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/33317fadadbf49be8412f6f60aaa10d2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/d32efabe259a4cc9a2dfdc67b50fc26e] 2024-12-09T17:23:45,478 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting d3b025f9f38f47e8ac34dde5322d06cf, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733765017860 2024-12-09T17:23:45,478 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting c495ab3ac9c54f04b14d07f17663fe5b, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733765017860 2024-12-09T17:23:45,478 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting b3e0dd7fc2d840b6a78a9df1762c6eea, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733765019994 2024-12-09T17:23:45,479 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33317fadadbf49be8412f6f60aaa10d2, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733765019994 2024-12-09T17:23:45,479 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 72d153d53b60466098457f57e0123717, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733765022116 2024-12-09T17:23:45,479 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting d32efabe259a4cc9a2dfdc67b50fc26e, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733765022116 2024-12-09T17:23:45,484 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 066f9d7bacdaa47fdbd6944bdacfa683#B#compaction#450 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:45,484 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/990d4cbefc5e4727acc8d771be74eb11 is 50, key is test_row_0/B:col10/1733765022116/Put/seqid=0 2024-12-09T17:23:45,485 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:45,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742356_1532 (size=12207) 2024-12-09T17:23:45,488 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241209e6f5fab04e12477f9533d7afa7f4cd5a_066f9d7bacdaa47fdbd6944bdacfa683 store=[table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:45,490 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241209e6f5fab04e12477f9533d7afa7f4cd5a_066f9d7bacdaa47fdbd6944bdacfa683, store=[table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:45,490 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209e6f5fab04e12477f9533d7afa7f4cd5a_066f9d7bacdaa47fdbd6944bdacfa683 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:45,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742357_1533 (size=4469) 2024-12-09T17:23:45,495 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/990d4cbefc5e4727acc8d771be74eb11 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/990d4cbefc5e4727acc8d771be74eb11 2024-12-09T17:23:45,498 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 066f9d7bacdaa47fdbd6944bdacfa683/B of 066f9d7bacdaa47fdbd6944bdacfa683 into 990d4cbefc5e4727acc8d771be74eb11(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:45,498 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:45,498 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683., storeName=066f9d7bacdaa47fdbd6944bdacfa683/B, priority=13, startTime=1733765025477; duration=0sec 2024-12-09T17:23:45,498 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:45,498 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 066f9d7bacdaa47fdbd6944bdacfa683:B 2024-12-09T17:23:45,498 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:45,499 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:45,499 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 066f9d7bacdaa47fdbd6944bdacfa683/C is initiating minor compaction (all files) 2024-12-09T17:23:45,499 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 066f9d7bacdaa47fdbd6944bdacfa683/C in TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:45,499 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/a0d14667d7794e018f92b35836f6a841, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/e1bcc86b1e20445cad4d5d1be40625ab, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/8ed2cf5373e14100bc615ea3b11173e4] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp, totalSize=35.3 K 2024-12-09T17:23:45,499 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting a0d14667d7794e018f92b35836f6a841, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733765017860 2024-12-09T17:23:45,500 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting e1bcc86b1e20445cad4d5d1be40625ab, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733765019994 2024-12-09T17:23:45,500 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ed2cf5373e14100bc615ea3b11173e4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733765022116 2024-12-09T17:23:45,506 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 066f9d7bacdaa47fdbd6944bdacfa683#C#compaction#452 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:45,506 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/564fca8c67154cd79a4cc92441d28e0e is 50, key is test_row_0/C:col10/1733765022116/Put/seqid=0 2024-12-09T17:23:45,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742358_1534 (size=12207) 2024-12-09T17:23:45,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-09T17:23:45,543 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:45,544 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-09T17:23:45,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:45,544 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing 066f9d7bacdaa47fdbd6944bdacfa683 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-09T17:23:45,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=A 2024-12-09T17:23:45,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:45,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=B 2024-12-09T17:23:45,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:45,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=C 2024-12-09T17:23:45,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:45,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209fdd8ab87f1be43188b40e72cbf4103fd_066f9d7bacdaa47fdbd6944bdacfa683 is 50, key is test_row_0/A:col10/1733765024249/Put/seqid=0 2024-12-09T17:23:45,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742359_1535 (size=12154) 2024-12-09T17:23:45,895 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 066f9d7bacdaa47fdbd6944bdacfa683#A#compaction#451 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:45,895 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/8a4bbfc42f294095b9da9d85ec24a058 is 175, key is test_row_0/A:col10/1733765022116/Put/seqid=0 2024-12-09T17:23:45,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742360_1536 (size=31161) 2024-12-09T17:23:45,913 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/564fca8c67154cd79a4cc92441d28e0e as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/564fca8c67154cd79a4cc92441d28e0e 2024-12-09T17:23:45,916 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 066f9d7bacdaa47fdbd6944bdacfa683/C of 066f9d7bacdaa47fdbd6944bdacfa683 into 564fca8c67154cd79a4cc92441d28e0e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:45,916 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:45,916 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683., storeName=066f9d7bacdaa47fdbd6944bdacfa683/C, priority=13, startTime=1733765025477; duration=0sec 2024-12-09T17:23:45,916 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:45,916 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 066f9d7bacdaa47fdbd6944bdacfa683:C 2024-12-09T17:23:45,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:45,954 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209fdd8ab87f1be43188b40e72cbf4103fd_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209fdd8ab87f1be43188b40e72cbf4103fd_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:45,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/298e09afa77b40d38ddab39c8c04d6d2, store: [table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:45,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/298e09afa77b40d38ddab39c8c04d6d2 is 175, key is test_row_0/A:col10/1733765024249/Put/seqid=0 2024-12-09T17:23:45,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742361_1537 (size=30955) 2024-12-09T17:23:46,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-09T17:23:46,301 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/8a4bbfc42f294095b9da9d85ec24a058 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/8a4bbfc42f294095b9da9d85ec24a058 2024-12-09T17:23:46,304 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 066f9d7bacdaa47fdbd6944bdacfa683/A of 066f9d7bacdaa47fdbd6944bdacfa683 into 8a4bbfc42f294095b9da9d85ec24a058(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:46,304 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:46,304 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683., storeName=066f9d7bacdaa47fdbd6944bdacfa683/A, priority=13, startTime=1733765025476; duration=0sec 2024-12-09T17:23:46,304 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:46,304 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 066f9d7bacdaa47fdbd6944bdacfa683:A 2024-12-09T17:23:46,358 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/298e09afa77b40d38ddab39c8c04d6d2 2024-12-09T17:23:46,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/77fdf9dce9a84b8b936bfe3b8c0ca2e8 is 50, key is test_row_0/B:col10/1733765024249/Put/seqid=0 2024-12-09T17:23:46,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742362_1538 (size=12001) 2024-12-09T17:23:46,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:46,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:46,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:46,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765086399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:46,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:46,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765086400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:46,403 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:46,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765086402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:46,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:46,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765086403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:46,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765086503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:46,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765086503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:46,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:46,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765086504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:46,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:46,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765086505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:46,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:46,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765086705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:46,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:46,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765086705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:46,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:46,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765086707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:46,708 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:46,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765086707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:46,765 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/77fdf9dce9a84b8b936bfe3b8c0ca2e8 2024-12-09T17:23:46,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/280bf4d5f6714698af18834d12119eb7 is 50, key is test_row_0/C:col10/1733765024249/Put/seqid=0 2024-12-09T17:23:46,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742363_1539 (size=12001) 2024-12-09T17:23:47,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:47,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765087007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:47,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:47,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765087007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:47,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:47,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765087008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:47,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:47,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765087011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:47,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-09T17:23:47,174 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/280bf4d5f6714698af18834d12119eb7 2024-12-09T17:23:47,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/298e09afa77b40d38ddab39c8c04d6d2 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/298e09afa77b40d38ddab39c8c04d6d2 2024-12-09T17:23:47,179 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/298e09afa77b40d38ddab39c8c04d6d2, entries=150, sequenceid=117, filesize=30.2 K 2024-12-09T17:23:47,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/77fdf9dce9a84b8b936bfe3b8c0ca2e8 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/77fdf9dce9a84b8b936bfe3b8c0ca2e8 2024-12-09T17:23:47,182 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/77fdf9dce9a84b8b936bfe3b8c0ca2e8, entries=150, sequenceid=117, filesize=11.7 K 2024-12-09T17:23:47,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/280bf4d5f6714698af18834d12119eb7 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/280bf4d5f6714698af18834d12119eb7 2024-12-09T17:23:47,185 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/280bf4d5f6714698af18834d12119eb7, entries=150, sequenceid=117, filesize=11.7 K 2024-12-09T17:23:47,185 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 066f9d7bacdaa47fdbd6944bdacfa683 in 1641ms, sequenceid=117, compaction requested=false 2024-12-09T17:23:47,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:47,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:47,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-12-09T17:23:47,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-12-09T17:23:47,187 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-12-09T17:23:47,187 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2510 sec 2024-12-09T17:23:47,188 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 2.2530 sec 2024-12-09T17:23:47,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:47,513 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 066f9d7bacdaa47fdbd6944bdacfa683 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-09T17:23:47,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=A 2024-12-09T17:23:47,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:47,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=B 2024-12-09T17:23:47,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:47,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=C 2024-12-09T17:23:47,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:47,517 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120902512969b8ba48538ad6206ff8b2146d_066f9d7bacdaa47fdbd6944bdacfa683 is 50, key is test_row_0/A:col10/1733765026370/Put/seqid=0 2024-12-09T17:23:47,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742364_1540 (size=12254) 2024-12-09T17:23:47,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:47,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765087529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:47,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:47,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765087529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:47,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:47,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765087531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:47,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:47,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765087532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:47,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:47,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765087632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:47,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:47,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765087633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:47,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:47,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765087634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:47,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:47,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765087635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:47,835 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:47,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765087834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:47,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:47,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765087835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:47,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:47,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765087837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:47,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:47,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765087837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:47,923 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:47,926 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120902512969b8ba48538ad6206ff8b2146d_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120902512969b8ba48538ad6206ff8b2146d_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:47,926 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/6d87d07977214b8ab3087479bc174d16, store: [table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:47,927 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/6d87d07977214b8ab3087479bc174d16 is 175, key is test_row_0/A:col10/1733765026370/Put/seqid=0 2024-12-09T17:23:47,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742365_1541 (size=31055) 2024-12-09T17:23:48,137 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:48,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765088136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:48,137 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:48,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765088136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:48,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:48,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765088140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:48,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:48,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765088140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:48,330 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=135, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/6d87d07977214b8ab3087479bc174d16 2024-12-09T17:23:48,335 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/8e7efb40c6ab4ade917a884226408f0b is 50, key is test_row_0/B:col10/1733765026370/Put/seqid=0 2024-12-09T17:23:48,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742366_1542 (size=12101) 2024-12-09T17:23:48,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:48,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765088640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:48,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:48,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765088641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:48,644 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:48,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765088642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:48,644 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:48,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765088642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:48,738 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/8e7efb40c6ab4ade917a884226408f0b 2024-12-09T17:23:48,742 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/dbfe3833638e458c81a6d2dd095c3f85 is 50, key is test_row_0/C:col10/1733765026370/Put/seqid=0 2024-12-09T17:23:48,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742367_1543 (size=12101) 2024-12-09T17:23:49,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-09T17:23:49,039 INFO [Thread-2274 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-12-09T17:23:49,039 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:23:49,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-12-09T17:23:49,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-09T17:23:49,040 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:23:49,041 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:23:49,041 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:23:49,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-09T17:23:49,145 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/dbfe3833638e458c81a6d2dd095c3f85 2024-12-09T17:23:49,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/6d87d07977214b8ab3087479bc174d16 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/6d87d07977214b8ab3087479bc174d16 2024-12-09T17:23:49,151 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/6d87d07977214b8ab3087479bc174d16, entries=150, sequenceid=135, filesize=30.3 K 2024-12-09T17:23:49,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/8e7efb40c6ab4ade917a884226408f0b as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/8e7efb40c6ab4ade917a884226408f0b 2024-12-09T17:23:49,154 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/8e7efb40c6ab4ade917a884226408f0b, entries=150, sequenceid=135, filesize=11.8 K 2024-12-09T17:23:49,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/dbfe3833638e458c81a6d2dd095c3f85 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/dbfe3833638e458c81a6d2dd095c3f85 2024-12-09T17:23:49,157 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/dbfe3833638e458c81a6d2dd095c3f85, entries=150, sequenceid=135, filesize=11.8 K 2024-12-09T17:23:49,158 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 066f9d7bacdaa47fdbd6944bdacfa683 in 1645ms, sequenceid=135, compaction requested=true 2024-12-09T17:23:49,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:49,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 066f9d7bacdaa47fdbd6944bdacfa683:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:23:49,158 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:49,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:49,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 066f9d7bacdaa47fdbd6944bdacfa683:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:23:49,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:49,158 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:49,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 066f9d7bacdaa47fdbd6944bdacfa683:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:23:49,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:49,158 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93171 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:49,158 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 066f9d7bacdaa47fdbd6944bdacfa683/A is initiating minor compaction (all files) 2024-12-09T17:23:49,159 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 066f9d7bacdaa47fdbd6944bdacfa683/A in TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:49,159 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/8a4bbfc42f294095b9da9d85ec24a058, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/298e09afa77b40d38ddab39c8c04d6d2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/6d87d07977214b8ab3087479bc174d16] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp, totalSize=91.0 K 2024-12-09T17:23:49,159 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:49,159 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. files: [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/8a4bbfc42f294095b9da9d85ec24a058, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/298e09afa77b40d38ddab39c8c04d6d2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/6d87d07977214b8ab3087479bc174d16] 2024-12-09T17:23:49,159 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:49,159 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 066f9d7bacdaa47fdbd6944bdacfa683/B is initiating minor compaction (all files) 2024-12-09T17:23:49,159 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 066f9d7bacdaa47fdbd6944bdacfa683/B in TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:49,159 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/990d4cbefc5e4727acc8d771be74eb11, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/77fdf9dce9a84b8b936bfe3b8c0ca2e8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/8e7efb40c6ab4ade917a884226408f0b] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp, totalSize=35.5 K 2024-12-09T17:23:49,159 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a4bbfc42f294095b9da9d85ec24a058, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733765022116 2024-12-09T17:23:49,159 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 990d4cbefc5e4727acc8d771be74eb11, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733765022116 2024-12-09T17:23:49,159 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 298e09afa77b40d38ddab39c8c04d6d2, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733765024247 2024-12-09T17:23:49,160 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 77fdf9dce9a84b8b936bfe3b8c0ca2e8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733765024247 2024-12-09T17:23:49,160 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d87d07977214b8ab3087479bc174d16, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733765026370 2024-12-09T17:23:49,160 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e7efb40c6ab4ade917a884226408f0b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733765026370 2024-12-09T17:23:49,165 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:49,165 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 066f9d7bacdaa47fdbd6944bdacfa683#B#compaction#459 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:49,165 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/3b01af86d6ff4ac89ab7535f0814bf2b is 50, key is test_row_0/B:col10/1733765026370/Put/seqid=0 2024-12-09T17:23:49,166 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120971294aee6c7648339b08650399cf947d_066f9d7bacdaa47fdbd6944bdacfa683 store=[table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:49,168 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120971294aee6c7648339b08650399cf947d_066f9d7bacdaa47fdbd6944bdacfa683, store=[table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:49,168 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120971294aee6c7648339b08650399cf947d_066f9d7bacdaa47fdbd6944bdacfa683 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:49,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742368_1544 (size=12409) 2024-12-09T17:23:49,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742369_1545 (size=4469) 2024-12-09T17:23:49,192 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:49,192 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-09T17:23:49,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:49,192 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 066f9d7bacdaa47fdbd6944bdacfa683 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-09T17:23:49,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=A 2024-12-09T17:23:49,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:49,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=B 2024-12-09T17:23:49,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:49,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=C 2024-12-09T17:23:49,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:49,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209847ae9429c324b5d8e7dc20e20358693_066f9d7bacdaa47fdbd6944bdacfa683 is 50, key is test_row_0/A:col10/1733765027531/Put/seqid=0 2024-12-09T17:23:49,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742370_1546 (size=12304) 2024-12-09T17:23:49,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-09T17:23:49,576 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/3b01af86d6ff4ac89ab7535f0814bf2b as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/3b01af86d6ff4ac89ab7535f0814bf2b 2024-12-09T17:23:49,579 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 066f9d7bacdaa47fdbd6944bdacfa683/B of 066f9d7bacdaa47fdbd6944bdacfa683 into 3b01af86d6ff4ac89ab7535f0814bf2b(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:49,579 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:49,579 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683., storeName=066f9d7bacdaa47fdbd6944bdacfa683/B, priority=13, startTime=1733765029158; duration=0sec 2024-12-09T17:23:49,579 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:49,579 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 066f9d7bacdaa47fdbd6944bdacfa683:B 2024-12-09T17:23:49,579 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:49,580 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:49,580 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 066f9d7bacdaa47fdbd6944bdacfa683/C is initiating minor compaction (all files) 2024-12-09T17:23:49,580 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 066f9d7bacdaa47fdbd6944bdacfa683/C in TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:49,580 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/564fca8c67154cd79a4cc92441d28e0e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/280bf4d5f6714698af18834d12119eb7, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/dbfe3833638e458c81a6d2dd095c3f85] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp, totalSize=35.5 K 2024-12-09T17:23:49,580 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 564fca8c67154cd79a4cc92441d28e0e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733765022116 2024-12-09T17:23:49,580 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 280bf4d5f6714698af18834d12119eb7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733765024247 2024-12-09T17:23:49,580 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting dbfe3833638e458c81a6d2dd095c3f85, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733765026370 2024-12-09T17:23:49,585 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 066f9d7bacdaa47fdbd6944bdacfa683#A#compaction#460 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:49,585 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 066f9d7bacdaa47fdbd6944bdacfa683#C#compaction#462 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:49,586 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/8eeac9f9250d42608b3ad7f0d83fea43 is 175, key is test_row_0/A:col10/1733765026370/Put/seqid=0 2024-12-09T17:23:49,586 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/9a17e2e4ce2e495696b46d5034e18de4 is 50, key is test_row_0/C:col10/1733765026370/Put/seqid=0 2024-12-09T17:23:49,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742372_1548 (size=12409) 2024-12-09T17:23:49,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742371_1547 (size=31363) 2024-12-09T17:23:49,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:49,602 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209847ae9429c324b5d8e7dc20e20358693_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209847ae9429c324b5d8e7dc20e20358693_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:49,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/dab86d0827d248e2ba15d3ef63e62607, store: [table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:49,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/dab86d0827d248e2ba15d3ef63e62607 is 175, key is test_row_0/A:col10/1733765027531/Put/seqid=0 2024-12-09T17:23:49,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742373_1549 (size=31105) 2024-12-09T17:23:49,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-09T17:23:49,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:49,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:49,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:49,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765089652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:49,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:49,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765089653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:49,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:49,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765089654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:49,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:49,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765089654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:49,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:49,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765089755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:49,756 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:49,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765089756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:49,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:49,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765089756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:49,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:49,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765089756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:49,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:49,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765089956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:49,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:49,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765089958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:49,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:49,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765089958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:49,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:49,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765089959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:49,992 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/9a17e2e4ce2e495696b46d5034e18de4 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/9a17e2e4ce2e495696b46d5034e18de4 2024-12-09T17:23:49,992 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/8eeac9f9250d42608b3ad7f0d83fea43 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/8eeac9f9250d42608b3ad7f0d83fea43 2024-12-09T17:23:49,995 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 066f9d7bacdaa47fdbd6944bdacfa683/C of 066f9d7bacdaa47fdbd6944bdacfa683 into 9a17e2e4ce2e495696b46d5034e18de4(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:49,995 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:49,995 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683., storeName=066f9d7bacdaa47fdbd6944bdacfa683/C, priority=13, startTime=1733765029158; duration=0sec 2024-12-09T17:23:49,995 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:49,995 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 066f9d7bacdaa47fdbd6944bdacfa683:C 2024-12-09T17:23:49,995 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 066f9d7bacdaa47fdbd6944bdacfa683/A of 066f9d7bacdaa47fdbd6944bdacfa683 into 8eeac9f9250d42608b3ad7f0d83fea43(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:49,995 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:49,995 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683., storeName=066f9d7bacdaa47fdbd6944bdacfa683/A, priority=13, startTime=1733765029158; duration=0sec 2024-12-09T17:23:49,996 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:49,996 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 066f9d7bacdaa47fdbd6944bdacfa683:A 2024-12-09T17:23:50,006 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=156, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/dab86d0827d248e2ba15d3ef63e62607 2024-12-09T17:23:50,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/ec6e1ff3752d4210b15da3abb6d56649 is 50, key is test_row_0/B:col10/1733765027531/Put/seqid=0 2024-12-09T17:23:50,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742374_1550 (size=12151) 2024-12-09T17:23:50,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-09T17:23:50,260 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:50,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765090260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:50,261 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:50,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765090260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:50,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:50,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765090261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:50,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:50,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765090263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:50,429 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/ec6e1ff3752d4210b15da3abb6d56649 2024-12-09T17:23:50,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/bdec3f92aec84788bbed642f45247130 is 50, key is test_row_0/C:col10/1733765027531/Put/seqid=0 2024-12-09T17:23:50,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742375_1551 (size=12151) 2024-12-09T17:23:50,441 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/bdec3f92aec84788bbed642f45247130 2024-12-09T17:23:50,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/dab86d0827d248e2ba15d3ef63e62607 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/dab86d0827d248e2ba15d3ef63e62607 2024-12-09T17:23:50,446 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/dab86d0827d248e2ba15d3ef63e62607, entries=150, sequenceid=156, filesize=30.4 K 2024-12-09T17:23:50,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/ec6e1ff3752d4210b15da3abb6d56649 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/ec6e1ff3752d4210b15da3abb6d56649 2024-12-09T17:23:50,449 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/ec6e1ff3752d4210b15da3abb6d56649, entries=150, sequenceid=156, filesize=11.9 K 2024-12-09T17:23:50,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/bdec3f92aec84788bbed642f45247130 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/bdec3f92aec84788bbed642f45247130 2024-12-09T17:23:50,451 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/bdec3f92aec84788bbed642f45247130, entries=150, sequenceid=156, filesize=11.9 K 2024-12-09T17:23:50,452 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 066f9d7bacdaa47fdbd6944bdacfa683 in 1260ms, sequenceid=156, compaction requested=false 2024-12-09T17:23:50,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:50,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:50,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-12-09T17:23:50,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-12-09T17:23:50,454 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-12-09T17:23:50,454 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4120 sec 2024-12-09T17:23:50,455 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 1.4150 sec 2024-12-09T17:23:50,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:50,764 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 066f9d7bacdaa47fdbd6944bdacfa683 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-09T17:23:50,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=A 2024-12-09T17:23:50,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:50,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=B 2024-12-09T17:23:50,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:50,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=C 2024-12-09T17:23:50,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:50,768 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412092aeedd241b064daaba39e670f1204b1e_066f9d7bacdaa47fdbd6944bdacfa683 is 50, key is test_row_0/A:col10/1733765029650/Put/seqid=0 2024-12-09T17:23:50,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742376_1552 (size=12304) 2024-12-09T17:23:50,780 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:50,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765090777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:50,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:50,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765090777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:50,781 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:50,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:50,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765090778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:50,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765090778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:50,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:50,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765090881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:50,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:50,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765090881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:50,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:50,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765090882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:50,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:50,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765090882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:51,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:51,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765091083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:51,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:51,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765091083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:51,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:51,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765091084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:51,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:51,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765091084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:51,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-09T17:23:51,143 INFO [Thread-2274 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-12-09T17:23:51,144 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:23:51,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-12-09T17:23:51,145 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:23:51,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-09T17:23:51,146 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:23:51,146 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:23:51,198 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:51,201 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412092aeedd241b064daaba39e670f1204b1e_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412092aeedd241b064daaba39e670f1204b1e_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:51,201 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/ab73aae394154a53aca5bb1dbbde36ab, store: [table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:51,202 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/ab73aae394154a53aca5bb1dbbde36ab is 175, key is test_row_0/A:col10/1733765029650/Put/seqid=0 2024-12-09T17:23:51,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742377_1553 (size=31105) 2024-12-09T17:23:51,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-09T17:23:51,297 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:51,297 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-09T17:23:51,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:51,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:51,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:51,297 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:51,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:51,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:51,389 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:51,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:51,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765091387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:51,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765091387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:51,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:51,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765091388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:51,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:51,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765091388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:51,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-09T17:23:51,449 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:51,449 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-09T17:23:51,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:51,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:51,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:51,450 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:51,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:51,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:51,601 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:51,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-09T17:23:51,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:51,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:51,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:51,602 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:51,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:51,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:51,605 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=175, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/ab73aae394154a53aca5bb1dbbde36ab 2024-12-09T17:23:51,609 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/c3b3b0cdfb4d4bbc9d49520aa12c3ed5 is 50, key is test_row_0/B:col10/1733765029650/Put/seqid=0 2024-12-09T17:23:51,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742378_1554 (size=12151) 2024-12-09T17:23:51,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-09T17:23:51,753 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:51,754 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-09T17:23:51,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:51,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:51,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:51,754 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:51,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:51,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:51,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:51,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765091891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:51,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:51,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765091893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:51,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:51,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765091894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:51,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:51,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765091895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:51,905 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:51,906 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-09T17:23:51,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:51,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:51,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:51,906 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:51,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:51,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:52,012 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/c3b3b0cdfb4d4bbc9d49520aa12c3ed5 2024-12-09T17:23:52,016 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/e35a3e9d08e3453b96cfd1406e4f5893 is 50, key is test_row_0/C:col10/1733765029650/Put/seqid=0 2024-12-09T17:23:52,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742379_1555 (size=12151) 2024-12-09T17:23:52,058 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:52,058 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-09T17:23:52,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:52,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:52,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:52,058 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:52,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:52,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:52,210 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:52,210 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-09T17:23:52,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:52,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:52,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:52,211 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:52,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:52,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:52,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-09T17:23:52,362 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:52,362 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-09T17:23:52,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:52,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:52,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:52,363 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:52,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:52,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:52,419 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/e35a3e9d08e3453b96cfd1406e4f5893 2024-12-09T17:23:52,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/ab73aae394154a53aca5bb1dbbde36ab as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/ab73aae394154a53aca5bb1dbbde36ab 2024-12-09T17:23:52,424 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/ab73aae394154a53aca5bb1dbbde36ab, entries=150, sequenceid=175, filesize=30.4 K 2024-12-09T17:23:52,424 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/c3b3b0cdfb4d4bbc9d49520aa12c3ed5 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/c3b3b0cdfb4d4bbc9d49520aa12c3ed5 2024-12-09T17:23:52,427 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/c3b3b0cdfb4d4bbc9d49520aa12c3ed5, entries=150, sequenceid=175, filesize=11.9 K 2024-12-09T17:23:52,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/e35a3e9d08e3453b96cfd1406e4f5893 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/e35a3e9d08e3453b96cfd1406e4f5893 2024-12-09T17:23:52,429 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/e35a3e9d08e3453b96cfd1406e4f5893, entries=150, sequenceid=175, filesize=11.9 K 2024-12-09T17:23:52,430 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 066f9d7bacdaa47fdbd6944bdacfa683 in 1667ms, sequenceid=175, compaction requested=true 2024-12-09T17:23:52,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:52,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 066f9d7bacdaa47fdbd6944bdacfa683:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:23:52,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:52,430 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:52,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 066f9d7bacdaa47fdbd6944bdacfa683:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:23:52,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:52,430 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:52,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 066f9d7bacdaa47fdbd6944bdacfa683:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:23:52,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:52,430 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:52,430 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93573 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:52,431 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 066f9d7bacdaa47fdbd6944bdacfa683/A is initiating minor compaction (all files) 2024-12-09T17:23:52,431 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 066f9d7bacdaa47fdbd6944bdacfa683/B is initiating minor compaction (all files) 2024-12-09T17:23:52,431 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 066f9d7bacdaa47fdbd6944bdacfa683/B in TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:52,431 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 066f9d7bacdaa47fdbd6944bdacfa683/A in TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:52,431 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/3b01af86d6ff4ac89ab7535f0814bf2b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/ec6e1ff3752d4210b15da3abb6d56649, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/c3b3b0cdfb4d4bbc9d49520aa12c3ed5] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp, totalSize=35.9 K 2024-12-09T17:23:52,431 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/8eeac9f9250d42608b3ad7f0d83fea43, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/dab86d0827d248e2ba15d3ef63e62607, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/ab73aae394154a53aca5bb1dbbde36ab] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp, totalSize=91.4 K 2024-12-09T17:23:52,431 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:52,431 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. files: [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/8eeac9f9250d42608b3ad7f0d83fea43, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/dab86d0827d248e2ba15d3ef63e62607, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/ab73aae394154a53aca5bb1dbbde36ab] 2024-12-09T17:23:52,431 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b01af86d6ff4ac89ab7535f0814bf2b, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733765026370 2024-12-09T17:23:52,431 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8eeac9f9250d42608b3ad7f0d83fea43, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733765026370 2024-12-09T17:23:52,431 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting ec6e1ff3752d4210b15da3abb6d56649, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733765027524 2024-12-09T17:23:52,431 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting dab86d0827d248e2ba15d3ef63e62607, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733765027524 2024-12-09T17:23:52,431 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting c3b3b0cdfb4d4bbc9d49520aa12c3ed5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733765029650 2024-12-09T17:23:52,431 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab73aae394154a53aca5bb1dbbde36ab, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733765029650 2024-12-09T17:23:52,435 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:52,436 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 066f9d7bacdaa47fdbd6944bdacfa683#B#compaction#468 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:52,436 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/97e3f57d147a4bc89dfd4167a48041e5 is 50, key is test_row_0/B:col10/1733765029650/Put/seqid=0 2024-12-09T17:23:52,436 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120950eeaf68b7094cd19bca7f77cbf69b37_066f9d7bacdaa47fdbd6944bdacfa683 store=[table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:52,437 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120950eeaf68b7094cd19bca7f77cbf69b37_066f9d7bacdaa47fdbd6944bdacfa683, store=[table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:52,438 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120950eeaf68b7094cd19bca7f77cbf69b37_066f9d7bacdaa47fdbd6944bdacfa683 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:52,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742381_1557 (size=12561) 2024-12-09T17:23:52,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742380_1556 (size=4469) 2024-12-09T17:23:52,442 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 066f9d7bacdaa47fdbd6944bdacfa683#A#compaction#469 average throughput is 3.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:52,442 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/3949e6091220470c8f2c09b7b8375c15 is 175, key is test_row_0/A:col10/1733765029650/Put/seqid=0 2024-12-09T17:23:52,444 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/97e3f57d147a4bc89dfd4167a48041e5 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/97e3f57d147a4bc89dfd4167a48041e5 2024-12-09T17:23:52,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742382_1558 (size=31515) 2024-12-09T17:23:52,448 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 066f9d7bacdaa47fdbd6944bdacfa683/B of 066f9d7bacdaa47fdbd6944bdacfa683 into 97e3f57d147a4bc89dfd4167a48041e5(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:52,448 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:52,448 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683., storeName=066f9d7bacdaa47fdbd6944bdacfa683/B, priority=13, startTime=1733765032430; duration=0sec 2024-12-09T17:23:52,448 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:52,448 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 066f9d7bacdaa47fdbd6944bdacfa683:B 2024-12-09T17:23:52,448 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:52,448 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:52,448 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 066f9d7bacdaa47fdbd6944bdacfa683/C is initiating minor compaction (all files) 2024-12-09T17:23:52,449 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 066f9d7bacdaa47fdbd6944bdacfa683/C in TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:52,449 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/9a17e2e4ce2e495696b46d5034e18de4, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/bdec3f92aec84788bbed642f45247130, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/e35a3e9d08e3453b96cfd1406e4f5893] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp, totalSize=35.9 K 2024-12-09T17:23:52,449 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a17e2e4ce2e495696b46d5034e18de4, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733765026370 2024-12-09T17:23:52,449 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting bdec3f92aec84788bbed642f45247130, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733765027524 2024-12-09T17:23:52,449 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting e35a3e9d08e3453b96cfd1406e4f5893, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733765029650 2024-12-09T17:23:52,454 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 066f9d7bacdaa47fdbd6944bdacfa683#C#compaction#470 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:52,454 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/4bbe9ebe538144de992a3654517b2895 is 50, key is test_row_0/C:col10/1733765029650/Put/seqid=0 2024-12-09T17:23:52,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742383_1559 (size=12561) 2024-12-09T17:23:52,459 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/4bbe9ebe538144de992a3654517b2895 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/4bbe9ebe538144de992a3654517b2895 2024-12-09T17:23:52,461 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 066f9d7bacdaa47fdbd6944bdacfa683/C of 066f9d7bacdaa47fdbd6944bdacfa683 into 4bbe9ebe538144de992a3654517b2895(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:52,461 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:52,461 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683., storeName=066f9d7bacdaa47fdbd6944bdacfa683/C, priority=13, startTime=1733765032430; duration=0sec 2024-12-09T17:23:52,461 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:52,461 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 066f9d7bacdaa47fdbd6944bdacfa683:C 2024-12-09T17:23:52,514 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:52,515 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-09T17:23:52,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:52,515 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 066f9d7bacdaa47fdbd6944bdacfa683 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-09T17:23:52,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=A 2024-12-09T17:23:52,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:52,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=B 2024-12-09T17:23:52,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:52,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=C 2024-12-09T17:23:52,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:52,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412095fa116ca07784769bf188aee225fea2e_066f9d7bacdaa47fdbd6944bdacfa683 is 50, key is test_row_0/A:col10/1733765030776/Put/seqid=0 2024-12-09T17:23:52,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742384_1560 (size=12304) 2024-12-09T17:23:52,849 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/3949e6091220470c8f2c09b7b8375c15 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/3949e6091220470c8f2c09b7b8375c15 2024-12-09T17:23:52,852 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 066f9d7bacdaa47fdbd6944bdacfa683/A of 066f9d7bacdaa47fdbd6944bdacfa683 into 3949e6091220470c8f2c09b7b8375c15(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:52,852 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:52,852 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683., storeName=066f9d7bacdaa47fdbd6944bdacfa683/A, priority=13, startTime=1733765032430; duration=0sec 2024-12-09T17:23:52,852 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:52,852 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 066f9d7bacdaa47fdbd6944bdacfa683:A 2024-12-09T17:23:52,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:52,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:52,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:52,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765092906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:52,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:52,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765092907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:52,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:52,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765092907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:52,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:52,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765092908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:52,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:52,925 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412095fa116ca07784769bf188aee225fea2e_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412095fa116ca07784769bf188aee225fea2e_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:52,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/f5e5b6b36ec042e6ac25e20566d3163a, store: [table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:52,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/f5e5b6b36ec042e6ac25e20566d3163a is 175, key is test_row_0/A:col10/1733765030776/Put/seqid=0 2024-12-09T17:23:52,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742385_1561 (size=31105) 2024-12-09T17:23:53,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:53,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765093009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:53,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:53,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765093009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:53,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:53,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765093010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:53,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:53,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765093011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:53,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:53,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765093211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:53,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:53,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765093212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:53,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:53,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:53,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765093212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:53,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765093212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:53,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-09T17:23:53,329 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=197, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/f5e5b6b36ec042e6ac25e20566d3163a 2024-12-09T17:23:53,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/459ebf190fc34241b0d92821378f3968 is 50, key is test_row_0/B:col10/1733765030776/Put/seqid=0 2024-12-09T17:23:53,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742386_1562 (size=12151) 2024-12-09T17:23:53,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:53,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765093514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:53,517 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:53,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765093516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:53,517 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:53,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765093516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:53,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:53,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765093516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:53,737 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/459ebf190fc34241b0d92821378f3968 2024-12-09T17:23:53,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/c37d6f9c2b684407aee5764e399f757d is 50, key is test_row_0/C:col10/1733765030776/Put/seqid=0 2024-12-09T17:23:53,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742387_1563 (size=12151) 2024-12-09T17:23:53,749 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/c37d6f9c2b684407aee5764e399f757d 2024-12-09T17:23:53,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/f5e5b6b36ec042e6ac25e20566d3163a as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/f5e5b6b36ec042e6ac25e20566d3163a 2024-12-09T17:23:53,755 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/f5e5b6b36ec042e6ac25e20566d3163a, entries=150, sequenceid=197, filesize=30.4 K 2024-12-09T17:23:53,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/459ebf190fc34241b0d92821378f3968 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/459ebf190fc34241b0d92821378f3968 2024-12-09T17:23:53,758 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/459ebf190fc34241b0d92821378f3968, entries=150, sequenceid=197, filesize=11.9 K 2024-12-09T17:23:53,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/c37d6f9c2b684407aee5764e399f757d as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/c37d6f9c2b684407aee5764e399f757d 2024-12-09T17:23:53,762 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/c37d6f9c2b684407aee5764e399f757d, entries=150, sequenceid=197, filesize=11.9 K 2024-12-09T17:23:53,763 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 066f9d7bacdaa47fdbd6944bdacfa683 in 1248ms, sequenceid=197, compaction requested=false 2024-12-09T17:23:53,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:53,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:53,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-12-09T17:23:53,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-12-09T17:23:53,765 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-12-09T17:23:53,765 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6180 sec 2024-12-09T17:23:53,766 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 2.6210 sec 2024-12-09T17:23:54,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:54,017 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 066f9d7bacdaa47fdbd6944bdacfa683 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-09T17:23:54,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=A 2024-12-09T17:23:54,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:54,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=B 2024-12-09T17:23:54,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:54,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=C 2024-12-09T17:23:54,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:54,022 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120904926a62dd534848b46d2de30b4c0101_066f9d7bacdaa47fdbd6944bdacfa683 is 50, key is test_row_0/A:col10/1733765032907/Put/seqid=0 2024-12-09T17:23:54,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742388_1564 (size=12304) 2024-12-09T17:23:54,025 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:54,027 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120904926a62dd534848b46d2de30b4c0101_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120904926a62dd534848b46d2de30b4c0101_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:54,028 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/d9c10749f771435ea67e19453f69950b, store: [table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:54,028 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/d9c10749f771435ea67e19453f69950b is 175, key is test_row_0/A:col10/1733765032907/Put/seqid=0 2024-12-09T17:23:54,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742389_1565 (size=31105) 2024-12-09T17:23:54,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:54,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765094029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:54,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:54,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765094030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:54,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:54,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765094030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:54,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:54,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765094032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:54,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:54,135 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:54,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765094133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:54,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765094134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:54,136 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:54,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765094134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:54,136 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:54,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765094135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:54,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:54,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:54,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765094336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:54,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765094336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:54,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:54,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765094336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:54,339 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:54,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765094337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:54,432 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=215, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/d9c10749f771435ea67e19453f69950b 2024-12-09T17:23:54,436 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/2c2f6e78f9664ae7a8e79821c7a4e565 is 50, key is test_row_0/B:col10/1733765032907/Put/seqid=0 2024-12-09T17:23:54,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742390_1566 (size=12151) 2024-12-09T17:23:54,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:54,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:54,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765094640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:54,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765094641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:54,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:54,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765094641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:54,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:54,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765094641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:54,839 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/2c2f6e78f9664ae7a8e79821c7a4e565 2024-12-09T17:23:54,843 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/88f81b171d39442db1a9615604606612 is 50, key is test_row_0/C:col10/1733765032907/Put/seqid=0 2024-12-09T17:23:54,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742391_1567 (size=12151) 2024-12-09T17:23:54,846 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/88f81b171d39442db1a9615604606612 2024-12-09T17:23:54,849 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/d9c10749f771435ea67e19453f69950b as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/d9c10749f771435ea67e19453f69950b 2024-12-09T17:23:54,851 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/d9c10749f771435ea67e19453f69950b, entries=150, sequenceid=215, filesize=30.4 K 2024-12-09T17:23:54,855 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/2c2f6e78f9664ae7a8e79821c7a4e565 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/2c2f6e78f9664ae7a8e79821c7a4e565 2024-12-09T17:23:54,858 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/2c2f6e78f9664ae7a8e79821c7a4e565, entries=150, sequenceid=215, filesize=11.9 K 2024-12-09T17:23:54,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/88f81b171d39442db1a9615604606612 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/88f81b171d39442db1a9615604606612 2024-12-09T17:23:54,861 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/88f81b171d39442db1a9615604606612, entries=150, sequenceid=215, filesize=11.9 K 2024-12-09T17:23:54,863 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 066f9d7bacdaa47fdbd6944bdacfa683 in 846ms, sequenceid=215, compaction requested=true 2024-12-09T17:23:54,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:54,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 066f9d7bacdaa47fdbd6944bdacfa683:A, priority=-2147483648, current under compaction store size is 1 2024-12-09T17:23:54,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:54,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 066f9d7bacdaa47fdbd6944bdacfa683:B, priority=-2147483648, current under compaction store size is 2 2024-12-09T17:23:54,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:54,864 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:54,864 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:54,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 066f9d7bacdaa47fdbd6944bdacfa683:C, priority=-2147483648, current under compaction store size is 3 2024-12-09T17:23:54,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:54,864 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:54,864 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93725 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:54,864 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 066f9d7bacdaa47fdbd6944bdacfa683/B is initiating minor compaction (all files) 2024-12-09T17:23:54,864 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1540): 066f9d7bacdaa47fdbd6944bdacfa683/A is initiating minor compaction (all files) 2024-12-09T17:23:54,864 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 066f9d7bacdaa47fdbd6944bdacfa683/B in TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:54,864 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 066f9d7bacdaa47fdbd6944bdacfa683/A in TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:54,864 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/97e3f57d147a4bc89dfd4167a48041e5, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/459ebf190fc34241b0d92821378f3968, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/2c2f6e78f9664ae7a8e79821c7a4e565] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp, totalSize=36.0 K 2024-12-09T17:23:54,865 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/3949e6091220470c8f2c09b7b8375c15, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/f5e5b6b36ec042e6ac25e20566d3163a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/d9c10749f771435ea67e19453f69950b] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp, totalSize=91.5 K 2024-12-09T17:23:54,865 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:54,865 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. files: [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/3949e6091220470c8f2c09b7b8375c15, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/f5e5b6b36ec042e6ac25e20566d3163a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/d9c10749f771435ea67e19453f69950b] 2024-12-09T17:23:54,865 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 97e3f57d147a4bc89dfd4167a48041e5, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733765029650 2024-12-09T17:23:54,865 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 459ebf190fc34241b0d92821378f3968, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733765030775 2024-12-09T17:23:54,865 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3949e6091220470c8f2c09b7b8375c15, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733765029650 2024-12-09T17:23:54,865 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c2f6e78f9664ae7a8e79821c7a4e565, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733765032902 2024-12-09T17:23:54,865 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5e5b6b36ec042e6ac25e20566d3163a, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733765030775 2024-12-09T17:23:54,865 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9c10749f771435ea67e19453f69950b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733765032902 2024-12-09T17:23:54,870 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 066f9d7bacdaa47fdbd6944bdacfa683#B#compaction#477 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:54,870 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:54,870 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/e882bf03a66b4557bb54eea5f36124bd is 50, key is test_row_0/B:col10/1733765032907/Put/seqid=0 2024-12-09T17:23:54,871 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241209323a80a8a2514e679f313c46f8e073e1_066f9d7bacdaa47fdbd6944bdacfa683 store=[table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:54,873 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241209323a80a8a2514e679f313c46f8e073e1_066f9d7bacdaa47fdbd6944bdacfa683, store=[table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:54,873 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209323a80a8a2514e679f313c46f8e073e1_066f9d7bacdaa47fdbd6944bdacfa683 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:54,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742392_1568 (size=12663) 2024-12-09T17:23:54,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742393_1569 (size=4469) 2024-12-09T17:23:54,877 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 066f9d7bacdaa47fdbd6944bdacfa683#A#compaction#478 average throughput is 4.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:54,877 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/7a61afb9e1da4f57bee4d1c90660a869 is 175, key is test_row_0/A:col10/1733765032907/Put/seqid=0 2024-12-09T17:23:54,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742394_1570 (size=31617) 2024-12-09T17:23:54,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:54,955 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 066f9d7bacdaa47fdbd6944bdacfa683 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-09T17:23:54,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=A 2024-12-09T17:23:54,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:54,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=B 2024-12-09T17:23:54,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:54,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=C 2024-12-09T17:23:54,956 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:54,961 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209f804ed357c4b4c8ca744bfa92e556deb_066f9d7bacdaa47fdbd6944bdacfa683 is 50, key is test_row_0/A:col10/1733765034028/Put/seqid=0 2024-12-09T17:23:54,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742395_1571 (size=12304) 2024-12-09T17:23:54,964 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:54,967 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209f804ed357c4b4c8ca744bfa92e556deb_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209f804ed357c4b4c8ca744bfa92e556deb_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:54,967 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/b88d1f53d58d46b198e46bc4bceb31f3, store: [table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:54,968 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/b88d1f53d58d46b198e46bc4bceb31f3 is 175, key is test_row_0/A:col10/1733765034028/Put/seqid=0 2024-12-09T17:23:54,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742396_1572 (size=31105) 2024-12-09T17:23:54,984 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:54,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58398 deadline: 1733765094982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:55,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:55,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58398 deadline: 1733765095085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:55,145 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:55,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765095144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:55,147 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:55,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765095145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:55,147 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:55,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765095146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:55,149 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:55,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765095148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:55,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-09T17:23:55,250 INFO [Thread-2274 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-12-09T17:23:55,250 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-09T17:23:55,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-12-09T17:23:55,252 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-09T17:23:55,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-09T17:23:55,252 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T17:23:55,252 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T17:23:55,278 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/e882bf03a66b4557bb54eea5f36124bd as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/e882bf03a66b4557bb54eea5f36124bd 2024-12-09T17:23:55,281 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 066f9d7bacdaa47fdbd6944bdacfa683/B of 066f9d7bacdaa47fdbd6944bdacfa683 into e882bf03a66b4557bb54eea5f36124bd(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:55,281 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:55,281 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683., storeName=066f9d7bacdaa47fdbd6944bdacfa683/B, priority=13, startTime=1733765034864; duration=0sec 2024-12-09T17:23:55,281 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T17:23:55,281 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 066f9d7bacdaa47fdbd6944bdacfa683:B 2024-12-09T17:23:55,281 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T17:23:55,282 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T17:23:55,282 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1540): 066f9d7bacdaa47fdbd6944bdacfa683/C is initiating minor compaction (all files) 2024-12-09T17:23:55,282 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 066f9d7bacdaa47fdbd6944bdacfa683/C in TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:55,282 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/4bbe9ebe538144de992a3654517b2895, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/c37d6f9c2b684407aee5764e399f757d, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/88f81b171d39442db1a9615604606612] into tmpdir=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp, totalSize=36.0 K 2024-12-09T17:23:55,283 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 4bbe9ebe538144de992a3654517b2895, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733765029650 2024-12-09T17:23:55,283 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting c37d6f9c2b684407aee5764e399f757d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733765030775 2024-12-09T17:23:55,283 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] compactions.Compactor(224): Compacting 88f81b171d39442db1a9615604606612, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733765032902 2024-12-09T17:23:55,287 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 066f9d7bacdaa47fdbd6944bdacfa683#C#compaction#480 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T17:23:55,288 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/aead0d77fa4f411f93c3925c0a0fbaa2 is 50, key is test_row_0/C:col10/1733765032907/Put/seqid=0 2024-12-09T17:23:55,288 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/7a61afb9e1da4f57bee4d1c90660a869 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/7a61afb9e1da4f57bee4d1c90660a869 2024-12-09T17:23:55,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:55,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58398 deadline: 1733765095288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:55,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742397_1573 (size=12663) 2024-12-09T17:23:55,292 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 066f9d7bacdaa47fdbd6944bdacfa683/A of 066f9d7bacdaa47fdbd6944bdacfa683 into 7a61afb9e1da4f57bee4d1c90660a869(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:55,292 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:55,292 INFO [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683., storeName=066f9d7bacdaa47fdbd6944bdacfa683/A, priority=13, startTime=1733765034864; duration=0sec 2024-12-09T17:23:55,292 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:55,292 DEBUG [RS:0;80c69eb3c456:42927-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 066f9d7bacdaa47fdbd6944bdacfa683:A 2024-12-09T17:23:55,294 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/aead0d77fa4f411f93c3925c0a0fbaa2 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/aead0d77fa4f411f93c3925c0a0fbaa2 2024-12-09T17:23:55,297 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 066f9d7bacdaa47fdbd6944bdacfa683/C of 066f9d7bacdaa47fdbd6944bdacfa683 into aead0d77fa4f411f93c3925c0a0fbaa2(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T17:23:55,297 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:55,297 INFO [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683., storeName=066f9d7bacdaa47fdbd6944bdacfa683/C, priority=13, startTime=1733765034864; duration=0sec 2024-12-09T17:23:55,297 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T17:23:55,297 DEBUG [RS:0;80c69eb3c456:42927-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 066f9d7bacdaa47fdbd6944bdacfa683:C 2024-12-09T17:23:55,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-09T17:23:55,373 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=235, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/b88d1f53d58d46b198e46bc4bceb31f3 2024-12-09T17:23:55,378 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/abcfcf4c45f84fbdb58217c783cb8964 is 50, key is test_row_0/B:col10/1733765034028/Put/seqid=0 2024-12-09T17:23:55,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742398_1574 (size=12151) 2024-12-09T17:23:55,403 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:55,403 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-09T17:23:55,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:55,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:55,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:55,404 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:55,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:55,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:55,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-09T17:23:55,555 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:55,556 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-09T17:23:55,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:55,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:55,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:55,556 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:55,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:55,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:55,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:55,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58398 deadline: 1733765095590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:55,707 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:55,708 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-09T17:23:55,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:55,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:55,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:55,708 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:55,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:55,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:55,780 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/abcfcf4c45f84fbdb58217c783cb8964 2024-12-09T17:23:55,785 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/59602ae0aa5d47db86ab08267f6d01a1 is 50, key is test_row_0/C:col10/1733765034028/Put/seqid=0 2024-12-09T17:23:55,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742399_1575 (size=12151) 2024-12-09T17:23:55,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-09T17:23:55,860 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:55,860 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-09T17:23:55,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:55,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:55,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:55,860 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:55,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:55,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:56,012 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:56,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-09T17:23:56,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:56,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:56,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:56,012 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:56,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:56,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:56,096 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:56,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58398 deadline: 1733765096095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:56,149 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:56,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58482 deadline: 1733765096148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:56,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:56,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58404 deadline: 1733765096149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:56,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:56,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58456 deadline: 1733765096154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:56,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-09T17:23:56,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42927 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58434 deadline: 1733765096155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:56,164 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:56,164 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-09T17:23:56,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:56,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:56,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:56,165 ERROR [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:56,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:56,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T17:23:56,198 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/59602ae0aa5d47db86ab08267f6d01a1 2024-12-09T17:23:56,201 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/b88d1f53d58d46b198e46bc4bceb31f3 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/b88d1f53d58d46b198e46bc4bceb31f3 2024-12-09T17:23:56,204 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/b88d1f53d58d46b198e46bc4bceb31f3, entries=150, sequenceid=235, filesize=30.4 K 2024-12-09T17:23:56,204 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/abcfcf4c45f84fbdb58217c783cb8964 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/abcfcf4c45f84fbdb58217c783cb8964 2024-12-09T17:23:56,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/abcfcf4c45f84fbdb58217c783cb8964, entries=150, sequenceid=235, filesize=11.9 K 2024-12-09T17:23:56,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/59602ae0aa5d47db86ab08267f6d01a1 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/59602ae0aa5d47db86ab08267f6d01a1 2024-12-09T17:23:56,209 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/59602ae0aa5d47db86ab08267f6d01a1, entries=150, sequenceid=235, filesize=11.9 K 2024-12-09T17:23:56,209 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 066f9d7bacdaa47fdbd6944bdacfa683 in 1254ms, sequenceid=235, compaction requested=false 2024-12-09T17:23:56,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:56,348 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:56,348 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42927 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-09T17:23:56,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:56,348 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing 066f9d7bacdaa47fdbd6944bdacfa683 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-09T17:23:56,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=A 2024-12-09T17:23:56,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:56,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=B 2024-12-09T17:23:56,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:56,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=C 2024-12-09T17:23:56,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:56,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412097830bd44afce48248fd6c363ac5d4212_066f9d7bacdaa47fdbd6944bdacfa683 is 50, key is test_row_0/A:col10/1733765034980/Put/seqid=0 2024-12-09T17:23:56,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-09T17:23:56,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742400_1576 (size=12304) 2024-12-09T17:23:56,721 DEBUG [Thread-2279 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7bf8843a to 127.0.0.1:54326 2024-12-09T17:23:56,721 DEBUG [Thread-2279 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:56,721 DEBUG [Thread-2275 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x31f7e171 to 127.0.0.1:54326 2024-12-09T17:23:56,721 DEBUG [Thread-2275 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:56,722 DEBUG [Thread-2281 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x76670256 to 127.0.0.1:54326 2024-12-09T17:23:56,722 DEBUG [Thread-2281 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:56,722 DEBUG [Thread-2283 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x36bc3633 to 127.0.0.1:54326 2024-12-09T17:23:56,722 DEBUG [Thread-2283 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:56,723 DEBUG [Thread-2277 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6c078737 to 127.0.0.1:54326 2024-12-09T17:23:56,723 DEBUG [Thread-2277 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:56,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:56,764 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412097830bd44afce48248fd6c363ac5d4212_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412097830bd44afce48248fd6c363ac5d4212_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:56,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/f36c08dfd3f749379e600837acb701df, store: [table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:23:56,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/f36c08dfd3f749379e600837acb701df is 175, key is test_row_0/A:col10/1733765034980/Put/seqid=0 2024-12-09T17:23:56,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742401_1577 (size=31105) 2024-12-09T17:23:57,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42927 {}] regionserver.HRegion(8581): Flush requested on 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:57,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. as already flushing 2024-12-09T17:23:57,106 DEBUG [Thread-2268 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x71c377ac to 127.0.0.1:54326 2024-12-09T17:23:57,106 DEBUG [Thread-2268 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:57,171 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=254, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/f36c08dfd3f749379e600837acb701df 2024-12-09T17:23:57,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/0eb9d30343874f43aaf04f1ea483c9b7 is 50, key is test_row_0/B:col10/1733765034980/Put/seqid=0 2024-12-09T17:23:57,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742402_1578 (size=12151) 2024-12-09T17:23:57,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-09T17:23:57,589 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/0eb9d30343874f43aaf04f1ea483c9b7 2024-12-09T17:23:57,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/6f31ed886077495086f17d88de91d7ff is 50, key is test_row_0/C:col10/1733765034980/Put/seqid=0 2024-12-09T17:23:57,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742403_1579 (size=12151) 2024-12-09T17:23:58,007 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/6f31ed886077495086f17d88de91d7ff 2024-12-09T17:23:58,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/f36c08dfd3f749379e600837acb701df as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/f36c08dfd3f749379e600837acb701df 2024-12-09T17:23:58,021 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/f36c08dfd3f749379e600837acb701df, entries=150, sequenceid=254, filesize=30.4 K 2024-12-09T17:23:58,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/0eb9d30343874f43aaf04f1ea483c9b7 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/0eb9d30343874f43aaf04f1ea483c9b7 2024-12-09T17:23:58,026 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/0eb9d30343874f43aaf04f1ea483c9b7, entries=150, sequenceid=254, filesize=11.9 K 2024-12-09T17:23:58,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/6f31ed886077495086f17d88de91d7ff as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/6f31ed886077495086f17d88de91d7ff 2024-12-09T17:23:58,031 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/6f31ed886077495086f17d88de91d7ff, entries=150, sequenceid=254, filesize=11.9 K 2024-12-09T17:23:58,032 INFO [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=6.71 KB/6870 for 066f9d7bacdaa47fdbd6944bdacfa683 in 1684ms, sequenceid=254, compaction requested=true 2024-12-09T17:23:58,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:23:58,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:58,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/80c69eb3c456:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-12-09T17:23:58,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-12-09T17:23:58,034 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-09T17:23:58,034 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7810 sec 2024-12-09T17:23:58,036 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 2.7850 sec 2024-12-09T17:23:58,160 DEBUG [Thread-2270 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7d1de3c9 to 127.0.0.1:54326 2024-12-09T17:23:58,160 DEBUG [Thread-2270 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:58,161 DEBUG [Thread-2266 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x081e0163 to 127.0.0.1:54326 2024-12-09T17:23:58,161 DEBUG [Thread-2266 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:58,165 DEBUG [Thread-2264 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68ba132a to 127.0.0.1:54326 2024-12-09T17:23:58,165 DEBUG [Thread-2264 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:58,167 DEBUG [Thread-2272 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09e22139 to 127.0.0.1:54326 2024-12-09T17:23:58,168 DEBUG [Thread-2272 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:59,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-09T17:23:59,358 INFO [Thread-2274 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-12-09T17:23:59,359 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-09T17:23:59,359 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 41 2024-12-09T17:23:59,359 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 43 2024-12-09T17:23:59,359 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 27 2024-12-09T17:23:59,359 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 47 2024-12-09T17:23:59,359 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 43 2024-12-09T17:23:59,359 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-09T17:23:59,359 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8668 2024-12-09T17:23:59,359 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8352 2024-12-09T17:23:59,359 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8593 2024-12-09T17:23:59,359 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8629 2024-12-09T17:23:59,359 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8348 2024-12-09T17:23:59,359 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-09T17:23:59,359 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-09T17:23:59,359 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x19952f0c to 127.0.0.1:54326 2024-12-09T17:23:59,359 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:23:59,361 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-09T17:23:59,361 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-09T17:23:59,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-09T17:23:59,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-09T17:23:59,367 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733765039366"}]},"ts":"1733765039366"} 2024-12-09T17:23:59,368 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-09T17:23:59,409 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-09T17:23:59,411 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-09T17:23:59,414 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=171, ppid=170, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=066f9d7bacdaa47fdbd6944bdacfa683, UNASSIGN}] 2024-12-09T17:23:59,415 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=066f9d7bacdaa47fdbd6944bdacfa683, UNASSIGN 2024-12-09T17:23:59,417 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=171 updating hbase:meta row=066f9d7bacdaa47fdbd6944bdacfa683, regionState=CLOSING, regionLocation=80c69eb3c456,42927,1733764865379 2024-12-09T17:23:59,419 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T17:23:59,419 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; CloseRegionProcedure 066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379}] 2024-12-09T17:23:59,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-09T17:23:59,572 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 80c69eb3c456,42927,1733764865379 2024-12-09T17:23:59,572 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(124): Close 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:23:59,572 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-09T17:23:59,573 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1681): Closing 066f9d7bacdaa47fdbd6944bdacfa683, disabling compactions & flushes 2024-12-09T17:23:59,573 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:59,573 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:59,573 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. after waiting 0 ms 2024-12-09T17:23:59,573 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:23:59,573 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(2837): Flushing 066f9d7bacdaa47fdbd6944bdacfa683 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-09T17:23:59,573 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=A 2024-12-09T17:23:59,574 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:59,574 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=B 2024-12-09T17:23:59,574 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:59,574 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 066f9d7bacdaa47fdbd6944bdacfa683, store=C 2024-12-09T17:23:59,574 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-09T17:23:59,586 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209ce775caedc6c4e2c93934c5d67e0bbf3_066f9d7bacdaa47fdbd6944bdacfa683 is 50, key is test_row_0/A:col10/1733765038166/Put/seqid=0 2024-12-09T17:23:59,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742404_1580 (size=12454) 2024-12-09T17:23:59,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-09T17:23:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-09T17:23:59,991 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T17:23:59,999 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241209ce775caedc6c4e2c93934c5d67e0bbf3_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209ce775caedc6c4e2c93934c5d67e0bbf3_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:24:00,000 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/5f63769820ab40ca834bcceaf6467045, store: [table=TestAcidGuarantees family=A region=066f9d7bacdaa47fdbd6944bdacfa683] 2024-12-09T17:24:00,001 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/5f63769820ab40ca834bcceaf6467045 is 175, key is test_row_0/A:col10/1733765038166/Put/seqid=0 2024-12-09T17:24:00,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742405_1581 (size=31255) 2024-12-09T17:24:00,408 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=262, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/5f63769820ab40ca834bcceaf6467045 2024-12-09T17:24:00,421 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/2c9846e3a65b413b88bd64c2763d0aa1 is 50, key is test_row_0/B:col10/1733765038166/Put/seqid=0 2024-12-09T17:24:00,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742406_1582 (size=12301) 2024-12-09T17:24:00,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-09T17:24:00,826 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/2c9846e3a65b413b88bd64c2763d0aa1 2024-12-09T17:24:00,838 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/9feb6b49333947e08c44120abf9cf7db is 50, key is test_row_0/C:col10/1733765038166/Put/seqid=0 2024-12-09T17:24:00,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742407_1583 (size=12301) 2024-12-09T17:24:01,244 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/9feb6b49333947e08c44120abf9cf7db 2024-12-09T17:24:01,277 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/A/5f63769820ab40ca834bcceaf6467045 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/5f63769820ab40ca834bcceaf6467045 2024-12-09T17:24:01,282 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/5f63769820ab40ca834bcceaf6467045, entries=150, sequenceid=262, filesize=30.5 K 2024-12-09T17:24:01,283 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/B/2c9846e3a65b413b88bd64c2763d0aa1 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/2c9846e3a65b413b88bd64c2763d0aa1 2024-12-09T17:24:01,287 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/2c9846e3a65b413b88bd64c2763d0aa1, entries=150, sequenceid=262, filesize=12.0 K 2024-12-09T17:24:01,287 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/.tmp/C/9feb6b49333947e08c44120abf9cf7db as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/9feb6b49333947e08c44120abf9cf7db 2024-12-09T17:24:01,290 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/9feb6b49333947e08c44120abf9cf7db, entries=150, sequenceid=262, filesize=12.0 K 2024-12-09T17:24:01,291 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 066f9d7bacdaa47fdbd6944bdacfa683 in 1718ms, sequenceid=262, compaction requested=true 2024-12-09T17:24:01,291 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/19820fd114854c9b923e5e2b590a1633, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/6e4769c81e0a4a26bb6b99f4db961bcd, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/a51ab9c3c845410f9b7bd22f5c9e02fd, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/c495ab3ac9c54f04b14d07f17663fe5b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/33317fadadbf49be8412f6f60aaa10d2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/d32efabe259a4cc9a2dfdc67b50fc26e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/8a4bbfc42f294095b9da9d85ec24a058, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/298e09afa77b40d38ddab39c8c04d6d2, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/8eeac9f9250d42608b3ad7f0d83fea43, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/6d87d07977214b8ab3087479bc174d16, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/dab86d0827d248e2ba15d3ef63e62607, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/3949e6091220470c8f2c09b7b8375c15, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/ab73aae394154a53aca5bb1dbbde36ab, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/f5e5b6b36ec042e6ac25e20566d3163a, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/d9c10749f771435ea67e19453f69950b] to archive 2024-12-09T17:24:01,292 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T17:24:01,293 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/19820fd114854c9b923e5e2b590a1633 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/19820fd114854c9b923e5e2b590a1633 2024-12-09T17:24:01,294 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/6e4769c81e0a4a26bb6b99f4db961bcd to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/6e4769c81e0a4a26bb6b99f4db961bcd 2024-12-09T17:24:01,296 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/a51ab9c3c845410f9b7bd22f5c9e02fd to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/a51ab9c3c845410f9b7bd22f5c9e02fd 2024-12-09T17:24:01,297 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/c495ab3ac9c54f04b14d07f17663fe5b to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/c495ab3ac9c54f04b14d07f17663fe5b 2024-12-09T17:24:01,298 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/33317fadadbf49be8412f6f60aaa10d2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/33317fadadbf49be8412f6f60aaa10d2 2024-12-09T17:24:01,300 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/d32efabe259a4cc9a2dfdc67b50fc26e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/d32efabe259a4cc9a2dfdc67b50fc26e 2024-12-09T17:24:01,301 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/8a4bbfc42f294095b9da9d85ec24a058 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/8a4bbfc42f294095b9da9d85ec24a058 2024-12-09T17:24:01,302 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/298e09afa77b40d38ddab39c8c04d6d2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/298e09afa77b40d38ddab39c8c04d6d2 2024-12-09T17:24:01,303 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/8eeac9f9250d42608b3ad7f0d83fea43 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/8eeac9f9250d42608b3ad7f0d83fea43 2024-12-09T17:24:01,305 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/6d87d07977214b8ab3087479bc174d16 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/6d87d07977214b8ab3087479bc174d16 2024-12-09T17:24:01,306 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/dab86d0827d248e2ba15d3ef63e62607 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/dab86d0827d248e2ba15d3ef63e62607 2024-12-09T17:24:01,308 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/3949e6091220470c8f2c09b7b8375c15 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/3949e6091220470c8f2c09b7b8375c15 2024-12-09T17:24:01,309 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/ab73aae394154a53aca5bb1dbbde36ab to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/ab73aae394154a53aca5bb1dbbde36ab 2024-12-09T17:24:01,311 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/f5e5b6b36ec042e6ac25e20566d3163a to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/f5e5b6b36ec042e6ac25e20566d3163a 2024-12-09T17:24:01,312 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/d9c10749f771435ea67e19453f69950b to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/d9c10749f771435ea67e19453f69950b 2024-12-09T17:24:01,314 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/ea891d5e0f244c02a791c42509d0bdfd, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/91053d5e28084a1d8484e11436388e98, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/d3b025f9f38f47e8ac34dde5322d06cf, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/2d7bec68ee0d4b5cad62cef3b12e1829, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/b3e0dd7fc2d840b6a78a9df1762c6eea, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/990d4cbefc5e4727acc8d771be74eb11, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/72d153d53b60466098457f57e0123717, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/77fdf9dce9a84b8b936bfe3b8c0ca2e8, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/3b01af86d6ff4ac89ab7535f0814bf2b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/8e7efb40c6ab4ade917a884226408f0b, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/ec6e1ff3752d4210b15da3abb6d56649, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/97e3f57d147a4bc89dfd4167a48041e5, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/c3b3b0cdfb4d4bbc9d49520aa12c3ed5, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/459ebf190fc34241b0d92821378f3968, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/2c2f6e78f9664ae7a8e79821c7a4e565] to archive 2024-12-09T17:24:01,315 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T17:24:01,317 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/ea891d5e0f244c02a791c42509d0bdfd to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/ea891d5e0f244c02a791c42509d0bdfd 2024-12-09T17:24:01,318 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/91053d5e28084a1d8484e11436388e98 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/91053d5e28084a1d8484e11436388e98 2024-12-09T17:24:01,319 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/d3b025f9f38f47e8ac34dde5322d06cf to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/d3b025f9f38f47e8ac34dde5322d06cf 2024-12-09T17:24:01,321 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/2d7bec68ee0d4b5cad62cef3b12e1829 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/2d7bec68ee0d4b5cad62cef3b12e1829 2024-12-09T17:24:01,322 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/b3e0dd7fc2d840b6a78a9df1762c6eea to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/b3e0dd7fc2d840b6a78a9df1762c6eea 2024-12-09T17:24:01,324 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/990d4cbefc5e4727acc8d771be74eb11 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/990d4cbefc5e4727acc8d771be74eb11 2024-12-09T17:24:01,325 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/72d153d53b60466098457f57e0123717 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/72d153d53b60466098457f57e0123717 2024-12-09T17:24:01,326 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/77fdf9dce9a84b8b936bfe3b8c0ca2e8 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/77fdf9dce9a84b8b936bfe3b8c0ca2e8 2024-12-09T17:24:01,327 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/3b01af86d6ff4ac89ab7535f0814bf2b to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/3b01af86d6ff4ac89ab7535f0814bf2b 2024-12-09T17:24:01,328 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/8e7efb40c6ab4ade917a884226408f0b to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/8e7efb40c6ab4ade917a884226408f0b 2024-12-09T17:24:01,329 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/ec6e1ff3752d4210b15da3abb6d56649 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/ec6e1ff3752d4210b15da3abb6d56649 2024-12-09T17:24:01,330 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/97e3f57d147a4bc89dfd4167a48041e5 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/97e3f57d147a4bc89dfd4167a48041e5 2024-12-09T17:24:01,331 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/c3b3b0cdfb4d4bbc9d49520aa12c3ed5 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/c3b3b0cdfb4d4bbc9d49520aa12c3ed5 2024-12-09T17:24:01,332 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/459ebf190fc34241b0d92821378f3968 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/459ebf190fc34241b0d92821378f3968 2024-12-09T17:24:01,333 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/2c2f6e78f9664ae7a8e79821c7a4e565 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/2c2f6e78f9664ae7a8e79821c7a4e565 2024-12-09T17:24:01,334 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/ca80a36913d9438084eacc72e78faf3c, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/e20c3cdb4ee54a73bfed81fd2b9ef224, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/a0d14667d7794e018f92b35836f6a841, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/cb2cd9bd54ba444b82cfe4e2d3901b59, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/e1bcc86b1e20445cad4d5d1be40625ab, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/564fca8c67154cd79a4cc92441d28e0e, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/8ed2cf5373e14100bc615ea3b11173e4, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/280bf4d5f6714698af18834d12119eb7, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/9a17e2e4ce2e495696b46d5034e18de4, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/dbfe3833638e458c81a6d2dd095c3f85, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/bdec3f92aec84788bbed642f45247130, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/4bbe9ebe538144de992a3654517b2895, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/e35a3e9d08e3453b96cfd1406e4f5893, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/c37d6f9c2b684407aee5764e399f757d, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/88f81b171d39442db1a9615604606612] to archive 2024-12-09T17:24:01,334 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T17:24:01,336 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/ca80a36913d9438084eacc72e78faf3c to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/ca80a36913d9438084eacc72e78faf3c 2024-12-09T17:24:01,336 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/e20c3cdb4ee54a73bfed81fd2b9ef224 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/e20c3cdb4ee54a73bfed81fd2b9ef224 2024-12-09T17:24:01,339 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/a0d14667d7794e018f92b35836f6a841 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/a0d14667d7794e018f92b35836f6a841 2024-12-09T17:24:01,340 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/cb2cd9bd54ba444b82cfe4e2d3901b59 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/cb2cd9bd54ba444b82cfe4e2d3901b59 2024-12-09T17:24:01,340 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/e1bcc86b1e20445cad4d5d1be40625ab to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/e1bcc86b1e20445cad4d5d1be40625ab 2024-12-09T17:24:01,341 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/564fca8c67154cd79a4cc92441d28e0e to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/564fca8c67154cd79a4cc92441d28e0e 2024-12-09T17:24:01,342 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/8ed2cf5373e14100bc615ea3b11173e4 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/8ed2cf5373e14100bc615ea3b11173e4 2024-12-09T17:24:01,342 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/280bf4d5f6714698af18834d12119eb7 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/280bf4d5f6714698af18834d12119eb7 2024-12-09T17:24:01,343 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/9a17e2e4ce2e495696b46d5034e18de4 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/9a17e2e4ce2e495696b46d5034e18de4 2024-12-09T17:24:01,344 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/dbfe3833638e458c81a6d2dd095c3f85 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/dbfe3833638e458c81a6d2dd095c3f85 2024-12-09T17:24:01,345 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/bdec3f92aec84788bbed642f45247130 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/bdec3f92aec84788bbed642f45247130 2024-12-09T17:24:01,345 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/4bbe9ebe538144de992a3654517b2895 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/4bbe9ebe538144de992a3654517b2895 2024-12-09T17:24:01,346 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/e35a3e9d08e3453b96cfd1406e4f5893 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/e35a3e9d08e3453b96cfd1406e4f5893 2024-12-09T17:24:01,347 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/c37d6f9c2b684407aee5764e399f757d to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/c37d6f9c2b684407aee5764e399f757d 2024-12-09T17:24:01,347 DEBUG [StoreCloser-TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/88f81b171d39442db1a9615604606612 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/88f81b171d39442db1a9615604606612 2024-12-09T17:24:01,350 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/recovered.edits/265.seqid, newMaxSeqId=265, maxSeqId=4 2024-12-09T17:24:01,350 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683. 2024-12-09T17:24:01,350 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1635): Region close journal for 066f9d7bacdaa47fdbd6944bdacfa683: 2024-12-09T17:24:01,352 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(170): Closed 066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:24:01,352 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=171 updating hbase:meta row=066f9d7bacdaa47fdbd6944bdacfa683, regionState=CLOSED 2024-12-09T17:24:01,354 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-12-09T17:24:01,354 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; CloseRegionProcedure 066f9d7bacdaa47fdbd6944bdacfa683, server=80c69eb3c456,42927,1733764865379 in 1.9340 sec 2024-12-09T17:24:01,355 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=171, resume processing ppid=170 2024-12-09T17:24:01,355 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, ppid=170, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=066f9d7bacdaa47fdbd6944bdacfa683, UNASSIGN in 1.9410 sec 2024-12-09T17:24:01,356 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-12-09T17:24:01,356 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9450 sec 2024-12-09T17:24:01,357 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733765041357"}]},"ts":"1733765041357"} 2024-12-09T17:24:01,358 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-09T17:24:01,367 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-09T17:24:01,368 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.0060 sec 2024-12-09T17:24:01,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-09T17:24:01,474 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-12-09T17:24:01,475 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-09T17:24:01,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:24:01,478 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=173, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:24:01,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-09T17:24:01,479 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=173, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:24:01,484 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:24:01,487 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A, FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B, FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C, FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/recovered.edits] 2024-12-09T17:24:01,491 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/5f63769820ab40ca834bcceaf6467045 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/5f63769820ab40ca834bcceaf6467045 2024-12-09T17:24:01,494 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/7a61afb9e1da4f57bee4d1c90660a869 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/7a61afb9e1da4f57bee4d1c90660a869 2024-12-09T17:24:01,496 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/b88d1f53d58d46b198e46bc4bceb31f3 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/b88d1f53d58d46b198e46bc4bceb31f3 2024-12-09T17:24:01,498 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/f36c08dfd3f749379e600837acb701df to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/A/f36c08dfd3f749379e600837acb701df 2024-12-09T17:24:01,501 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/0eb9d30343874f43aaf04f1ea483c9b7 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/0eb9d30343874f43aaf04f1ea483c9b7 2024-12-09T17:24:01,503 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/2c9846e3a65b413b88bd64c2763d0aa1 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/2c9846e3a65b413b88bd64c2763d0aa1 2024-12-09T17:24:01,505 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/abcfcf4c45f84fbdb58217c783cb8964 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/abcfcf4c45f84fbdb58217c783cb8964 2024-12-09T17:24:01,507 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/e882bf03a66b4557bb54eea5f36124bd to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/B/e882bf03a66b4557bb54eea5f36124bd 2024-12-09T17:24:01,511 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/59602ae0aa5d47db86ab08267f6d01a1 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/59602ae0aa5d47db86ab08267f6d01a1 2024-12-09T17:24:01,513 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/6f31ed886077495086f17d88de91d7ff to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/6f31ed886077495086f17d88de91d7ff 2024-12-09T17:24:01,515 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/9feb6b49333947e08c44120abf9cf7db to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/9feb6b49333947e08c44120abf9cf7db 2024-12-09T17:24:01,516 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/aead0d77fa4f411f93c3925c0a0fbaa2 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/C/aead0d77fa4f411f93c3925c0a0fbaa2 2024-12-09T17:24:01,519 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/recovered.edits/265.seqid to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683/recovered.edits/265.seqid 2024-12-09T17:24:01,520 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/default/TestAcidGuarantees/066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:24:01,520 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-09T17:24:01,520 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-09T17:24:01,521 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-09T17:24:01,523 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120902512969b8ba48538ad6206ff8b2146d_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120902512969b8ba48538ad6206ff8b2146d_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:24:01,524 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120904926a62dd534848b46d2de30b4c0101_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120904926a62dd534848b46d2de30b4c0101_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:24:01,525 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412092aeedd241b064daaba39e670f1204b1e_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412092aeedd241b064daaba39e670f1204b1e_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:24:01,525 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120951857be877d640689922d90973e63398_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120951857be877d640689922d90973e63398_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:24:01,526 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412095fa116ca07784769bf188aee225fea2e_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412095fa116ca07784769bf188aee225fea2e_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:24:01,527 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120966106c0b5ec44843b53b43c44e3fcd90_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120966106c0b5ec44843b53b43c44e3fcd90_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:24:01,528 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412097830bd44afce48248fd6c363ac5d4212_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412097830bd44afce48248fd6c363ac5d4212_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:24:01,529 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209847ae9429c324b5d8e7dc20e20358693_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209847ae9429c324b5d8e7dc20e20358693_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:24:01,529 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209ad0c39e6b0d142e6a9964bab7cc1d1e9_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209ad0c39e6b0d142e6a9964bab7cc1d1e9_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:24:01,530 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209c1be84397f77481985e92dce1c881597_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209c1be84397f77481985e92dce1c881597_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:24:01,531 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209ce775caedc6c4e2c93934c5d67e0bbf3_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209ce775caedc6c4e2c93934c5d67e0bbf3_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:24:01,531 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209e4664a0dac114bb4a67780f40b28fab6_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209e4664a0dac114bb4a67780f40b28fab6_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:24:01,532 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209f804ed357c4b4c8ca744bfa92e556deb_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209f804ed357c4b4c8ca744bfa92e556deb_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:24:01,533 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209fdd8ab87f1be43188b40e72cbf4103fd_066f9d7bacdaa47fdbd6944bdacfa683 to hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241209fdd8ab87f1be43188b40e72cbf4103fd_066f9d7bacdaa47fdbd6944bdacfa683 2024-12-09T17:24:01,533 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-09T17:24:01,535 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=173, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:24:01,536 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-09T17:24:01,538 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-09T17:24:01,538 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=173, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:24:01,538 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-09T17:24:01,539 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733765041538"}]},"ts":"9223372036854775807"} 2024-12-09T17:24:01,540 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-09T17:24:01,540 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 066f9d7bacdaa47fdbd6944bdacfa683, NAME => 'TestAcidGuarantees,,1733765013475.066f9d7bacdaa47fdbd6944bdacfa683.', STARTKEY => '', ENDKEY => ''}] 2024-12-09T17:24:01,540 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-09T17:24:01,540 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733765041540"}]},"ts":"9223372036854775807"} 2024-12-09T17:24:01,541 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-09T17:24:01,551 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=173, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-09T17:24:01,552 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 76 msec 2024-12-09T17:24:01,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45541 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-09T17:24:01,581 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-12-09T17:24:01,594 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=239 (was 239), OpenFileDescriptor=452 (was 451) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=319 (was 325), ProcessCount=11 (was 11), AvailableMemoryMB=4193 (was 4203) 2024-12-09T17:24:01,594 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-09T17:24:01,594 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-09T17:24:01,594 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6169df5c to 127.0.0.1:54326 2024-12-09T17:24:01,594 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:24:01,594 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T17:24:01,594 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1483939531, stopped=false 2024-12-09T17:24:01,595 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=80c69eb3c456,45541,1733764864652 2024-12-09T17:24:01,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T17:24:01,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42927-0x1000bcf74250001, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T17:24:01,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T17:24:01,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42927-0x1000bcf74250001, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T17:24:01,601 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-09T17:24:01,601 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:24:01,601 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42927-0x1000bcf74250001, quorum=127.0.0.1:54326, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T17:24:01,601 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T17:24:01,601 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '80c69eb3c456,42927,1733764865379' ***** 2024-12-09T17:24:01,601 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-09T17:24:01,602 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T17:24:01,602 INFO [RS:0;80c69eb3c456:42927 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T17:24:01,602 INFO [RS:0;80c69eb3c456:42927 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T17:24:01,602 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-09T17:24:01,603 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(3579): Received CLOSE for ad9a19a7365c7aeecc9593a7078cfd44 2024-12-09T17:24:01,603 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(1224): stopping server 80c69eb3c456,42927,1733764865379 2024-12-09T17:24:01,603 DEBUG [RS:0;80c69eb3c456:42927 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:24:01,603 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T17:24:01,603 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T17:24:01,603 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T17:24:01,603 INFO [regionserver/80c69eb3c456:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T17:24:01,603 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-09T17:24:01,604 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing ad9a19a7365c7aeecc9593a7078cfd44, disabling compactions & flushes 2024-12-09T17:24:01,604 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733764869171.ad9a19a7365c7aeecc9593a7078cfd44. 2024-12-09T17:24:01,604 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-09T17:24:01,604 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733764869171.ad9a19a7365c7aeecc9593a7078cfd44. 2024-12-09T17:24:01,604 DEBUG [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(1603): Online Regions={ad9a19a7365c7aeecc9593a7078cfd44=hbase:namespace,,1733764869171.ad9a19a7365c7aeecc9593a7078cfd44., 1588230740=hbase:meta,,1.1588230740} 2024-12-09T17:24:01,604 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733764869171.ad9a19a7365c7aeecc9593a7078cfd44. after waiting 0 ms 2024-12-09T17:24:01,604 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733764869171.ad9a19a7365c7aeecc9593a7078cfd44. 2024-12-09T17:24:01,604 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing ad9a19a7365c7aeecc9593a7078cfd44 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-09T17:24:01,604 DEBUG [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-09T17:24:01,604 INFO [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-09T17:24:01,604 DEBUG [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-09T17:24:01,604 DEBUG [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T17:24:01,604 DEBUG [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T17:24:01,604 INFO [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-12-09T17:24:01,604 DEBUG [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, ad9a19a7365c7aeecc9593a7078cfd44 2024-12-09T17:24:01,626 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/namespace/ad9a19a7365c7aeecc9593a7078cfd44/.tmp/info/051e0a5a8bd749a998c9c678b8492aaf is 45, key is default/info:d/1733764870233/Put/seqid=0 2024-12-09T17:24:01,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742408_1584 (size=5037) 2024-12-09T17:24:01,630 DEBUG [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/meta/1588230740/.tmp/info/31a6c75c29714343a289eb47fe5614c8 is 143, key is hbase:namespace,,1733764869171.ad9a19a7365c7aeecc9593a7078cfd44./info:regioninfo/1733764870050/Put/seqid=0 2024-12-09T17:24:01,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742409_1585 (size=7725) 2024-12-09T17:24:01,805 DEBUG [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, ad9a19a7365c7aeecc9593a7078cfd44 2024-12-09T17:24:02,005 DEBUG [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, ad9a19a7365c7aeecc9593a7078cfd44 2024-12-09T17:24:02,030 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/namespace/ad9a19a7365c7aeecc9593a7078cfd44/.tmp/info/051e0a5a8bd749a998c9c678b8492aaf 2024-12-09T17:24:02,034 INFO [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/meta/1588230740/.tmp/info/31a6c75c29714343a289eb47fe5614c8 2024-12-09T17:24:02,040 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/namespace/ad9a19a7365c7aeecc9593a7078cfd44/.tmp/info/051e0a5a8bd749a998c9c678b8492aaf as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/namespace/ad9a19a7365c7aeecc9593a7078cfd44/info/051e0a5a8bd749a998c9c678b8492aaf 2024-12-09T17:24:02,044 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/namespace/ad9a19a7365c7aeecc9593a7078cfd44/info/051e0a5a8bd749a998c9c678b8492aaf, entries=2, sequenceid=6, filesize=4.9 K 2024-12-09T17:24:02,045 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for ad9a19a7365c7aeecc9593a7078cfd44 in 441ms, sequenceid=6, compaction requested=false 2024-12-09T17:24:02,049 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/namespace/ad9a19a7365c7aeecc9593a7078cfd44/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T17:24:02,050 INFO [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733764869171.ad9a19a7365c7aeecc9593a7078cfd44. 2024-12-09T17:24:02,050 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for ad9a19a7365c7aeecc9593a7078cfd44: 2024-12-09T17:24:02,050 DEBUG [RS_CLOSE_REGION-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733764869171.ad9a19a7365c7aeecc9593a7078cfd44. 2024-12-09T17:24:02,057 DEBUG [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/meta/1588230740/.tmp/rep_barrier/a94ac5f10666445f8feedc8400c6c3fd is 102, key is TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb./rep_barrier:/1733764898170/DeleteFamily/seqid=0 2024-12-09T17:24:02,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742410_1586 (size=6025) 2024-12-09T17:24:02,108 INFO [regionserver/80c69eb3c456:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T17:24:02,109 INFO [regionserver/80c69eb3c456:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T17:24:02,206 DEBUG [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-09T17:24:02,406 DEBUG [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-09T17:24:02,461 INFO [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/meta/1588230740/.tmp/rep_barrier/a94ac5f10666445f8feedc8400c6c3fd 2024-12-09T17:24:02,483 DEBUG [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/meta/1588230740/.tmp/table/3527afc48a984074b7211c1492f194c5 is 96, key is TestAcidGuarantees,,1733764870524.326764652e67b313fc217edc01a9dfcb./table:/1733764898170/DeleteFamily/seqid=0 2024-12-09T17:24:02,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742411_1587 (size=5942) 2024-12-09T17:24:02,606 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-09T17:24:02,606 DEBUG [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T17:24:02,606 DEBUG [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-09T17:24:02,807 DEBUG [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-09T17:24:02,887 INFO [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/meta/1588230740/.tmp/table/3527afc48a984074b7211c1492f194c5 2024-12-09T17:24:02,896 DEBUG [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/meta/1588230740/.tmp/info/31a6c75c29714343a289eb47fe5614c8 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/meta/1588230740/info/31a6c75c29714343a289eb47fe5614c8 2024-12-09T17:24:02,903 INFO [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/meta/1588230740/info/31a6c75c29714343a289eb47fe5614c8, entries=22, sequenceid=93, filesize=7.5 K 2024-12-09T17:24:02,904 DEBUG [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/meta/1588230740/.tmp/rep_barrier/a94ac5f10666445f8feedc8400c6c3fd as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/meta/1588230740/rep_barrier/a94ac5f10666445f8feedc8400c6c3fd 2024-12-09T17:24:02,907 INFO [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/meta/1588230740/rep_barrier/a94ac5f10666445f8feedc8400c6c3fd, entries=6, sequenceid=93, filesize=5.9 K 2024-12-09T17:24:02,908 DEBUG [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/meta/1588230740/.tmp/table/3527afc48a984074b7211c1492f194c5 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/meta/1588230740/table/3527afc48a984074b7211c1492f194c5 2024-12-09T17:24:02,911 INFO [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/meta/1588230740/table/3527afc48a984074b7211c1492f194c5, entries=9, sequenceid=93, filesize=5.8 K 2024-12-09T17:24:02,912 INFO [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1308ms, sequenceid=93, compaction requested=false 2024-12-09T17:24:02,916 DEBUG [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-12-09T17:24:02,917 DEBUG [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T17:24:02,917 INFO [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-09T17:24:02,917 DEBUG [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-09T17:24:02,917 DEBUG [RS_CLOSE_META-regionserver/80c69eb3c456:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T17:24:03,007 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(1250): stopping server 80c69eb3c456,42927,1733764865379; all regions closed. 2024-12-09T17:24:03,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741834_1010 (size=26050) 2024-12-09T17:24:03,020 DEBUG [RS:0;80c69eb3c456:42927 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/oldWALs 2024-12-09T17:24:03,020 INFO [RS:0;80c69eb3c456:42927 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 80c69eb3c456%2C42927%2C1733764865379.meta:.meta(num 1733764868762) 2024-12-09T17:24:03,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741833_1009 (size=12911056) 2024-12-09T17:24:03,024 DEBUG [RS:0;80c69eb3c456:42927 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/oldWALs 2024-12-09T17:24:03,024 INFO [RS:0;80c69eb3c456:42927 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 80c69eb3c456%2C42927%2C1733764865379:(num 1733764868257) 2024-12-09T17:24:03,024 DEBUG [RS:0;80c69eb3c456:42927 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:24:03,025 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T17:24:03,025 INFO [RS:0;80c69eb3c456:42927 {}] hbase.ChoreService(370): Chore service for: regionserver/80c69eb3c456:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-09T17:24:03,025 INFO [regionserver/80c69eb3c456:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-09T17:24:03,026 INFO [RS:0;80c69eb3c456:42927 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:42927 2024-12-09T17:24:03,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T17:24:03,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42927-0x1000bcf74250001, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/80c69eb3c456,42927,1733764865379 2024-12-09T17:24:03,076 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [80c69eb3c456,42927,1733764865379] 2024-12-09T17:24:03,076 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 80c69eb3c456,42927,1733764865379; numProcessing=1 2024-12-09T17:24:03,084 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/80c69eb3c456,42927,1733764865379 already deleted, retry=false 2024-12-09T17:24:03,084 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 80c69eb3c456,42927,1733764865379 expired; onlineServers=0 2024-12-09T17:24:03,084 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '80c69eb3c456,45541,1733764864652' ***** 2024-12-09T17:24:03,084 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T17:24:03,085 DEBUG [M:0;80c69eb3c456:45541 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ccc1daf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=80c69eb3c456/172.17.0.2:0 2024-12-09T17:24:03,085 INFO [M:0;80c69eb3c456:45541 {}] regionserver.HRegionServer(1224): stopping server 80c69eb3c456,45541,1733764864652 2024-12-09T17:24:03,085 INFO [M:0;80c69eb3c456:45541 {}] regionserver.HRegionServer(1250): stopping server 80c69eb3c456,45541,1733764864652; all regions closed. 2024-12-09T17:24:03,085 DEBUG [M:0;80c69eb3c456:45541 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T17:24:03,085 DEBUG [M:0;80c69eb3c456:45541 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T17:24:03,086 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T17:24:03,086 DEBUG [M:0;80c69eb3c456:45541 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T17:24:03,086 DEBUG [master/80c69eb3c456:0:becomeActiveMaster-HFileCleaner.large.0-1733764867912 {}] cleaner.HFileCleaner(306): Exit Thread[master/80c69eb3c456:0:becomeActiveMaster-HFileCleaner.large.0-1733764867912,5,FailOnTimeoutGroup] 2024-12-09T17:24:03,086 DEBUG [master/80c69eb3c456:0:becomeActiveMaster-HFileCleaner.small.0-1733764867915 {}] cleaner.HFileCleaner(306): Exit Thread[master/80c69eb3c456:0:becomeActiveMaster-HFileCleaner.small.0-1733764867915,5,FailOnTimeoutGroup] 2024-12-09T17:24:03,087 INFO [M:0;80c69eb3c456:45541 {}] hbase.ChoreService(370): Chore service for: master/80c69eb3c456:0 had [] on shutdown 2024-12-09T17:24:03,087 DEBUG [M:0;80c69eb3c456:45541 {}] master.HMaster(1733): Stopping service threads 2024-12-09T17:24:03,087 INFO [M:0;80c69eb3c456:45541 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T17:24:03,088 ERROR [M:0;80c69eb3c456:45541 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-5,5,PEWorkerGroup] Thread[IPC Client (1573750593) connection to localhost/127.0.0.1:42193 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:42193,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-12-09T17:24:03,089 INFO [M:0;80c69eb3c456:45541 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T17:24:03,089 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T17:24:03,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T17:24:03,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T17:24:03,092 DEBUG [M:0;80c69eb3c456:45541 {}] zookeeper.ZKUtil(347): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T17:24:03,092 WARN [M:0;80c69eb3c456:45541 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T17:24:03,092 INFO [M:0;80c69eb3c456:45541 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-09T17:24:03,092 INFO [M:0;80c69eb3c456:45541 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T17:24:03,092 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T17:24:03,093 DEBUG [M:0;80c69eb3c456:45541 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T17:24:03,093 INFO [M:0;80c69eb3c456:45541 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T17:24:03,093 DEBUG [M:0;80c69eb3c456:45541 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T17:24:03,093 DEBUG [M:0;80c69eb3c456:45541 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T17:24:03,093 DEBUG [M:0;80c69eb3c456:45541 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T17:24:03,093 INFO [M:0;80c69eb3c456:45541 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=718.66 KB heapSize=879.73 KB 2024-12-09T17:24:03,112 DEBUG [M:0;80c69eb3c456:45541 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2611c48b3a0840feac8d8a94da89cb92 is 82, key is hbase:meta,,1/info:regioninfo/1733764868911/Put/seqid=0 2024-12-09T17:24:03,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742412_1588 (size=5672) 2024-12-09T17:24:03,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42927-0x1000bcf74250001, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T17:24:03,176 INFO [RS:0;80c69eb3c456:42927 {}] regionserver.HRegionServer(1307): Exiting; stopping=80c69eb3c456,42927,1733764865379; zookeeper connection closed. 2024-12-09T17:24:03,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42927-0x1000bcf74250001, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T17:24:03,176 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5a2b915d {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5a2b915d 2024-12-09T17:24:03,177 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T17:24:03,516 INFO [M:0;80c69eb3c456:45541 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=1970 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2611c48b3a0840feac8d8a94da89cb92 2024-12-09T17:24:03,542 DEBUG [M:0;80c69eb3c456:45541 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/294c8c816ac3445c8099b8e2b0e46d21 is 2283, key is \x00\x00\x00\x00\x00\x00\x00g/proc:d/1733764961241/Put/seqid=0 2024-12-09T17:24:03,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742413_1589 (size=44008) 2024-12-09T17:24:03,858 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T17:24:03,947 INFO [M:0;80c69eb3c456:45541 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=718.11 KB at sequenceid=1970 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/294c8c816ac3445c8099b8e2b0e46d21 2024-12-09T17:24:03,955 INFO [M:0;80c69eb3c456:45541 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 294c8c816ac3445c8099b8e2b0e46d21 2024-12-09T17:24:03,973 DEBUG [M:0;80c69eb3c456:45541 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5db428399ede4523a8b3db2f7c9477e1 is 69, key is 80c69eb3c456,42927,1733764865379/rs:state/1733764868046/Put/seqid=0 2024-12-09T17:24:03,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073742414_1590 (size=5156) 2024-12-09T17:24:04,377 INFO [M:0;80c69eb3c456:45541 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=1970 (bloomFilter=true), to=hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5db428399ede4523a8b3db2f7c9477e1 2024-12-09T17:24:04,386 DEBUG [M:0;80c69eb3c456:45541 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2611c48b3a0840feac8d8a94da89cb92 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2611c48b3a0840feac8d8a94da89cb92 2024-12-09T17:24:04,391 INFO [M:0;80c69eb3c456:45541 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2611c48b3a0840feac8d8a94da89cb92, entries=8, sequenceid=1970, filesize=5.5 K 2024-12-09T17:24:04,392 DEBUG [M:0;80c69eb3c456:45541 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/294c8c816ac3445c8099b8e2b0e46d21 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/294c8c816ac3445c8099b8e2b0e46d21 2024-12-09T17:24:04,396 INFO [M:0;80c69eb3c456:45541 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 294c8c816ac3445c8099b8e2b0e46d21 2024-12-09T17:24:04,396 INFO [M:0;80c69eb3c456:45541 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/294c8c816ac3445c8099b8e2b0e46d21, entries=173, sequenceid=1970, filesize=43.0 K 2024-12-09T17:24:04,397 DEBUG [M:0;80c69eb3c456:45541 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5db428399ede4523a8b3db2f7c9477e1 as hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5db428399ede4523a8b3db2f7c9477e1 2024-12-09T17:24:04,401 INFO [M:0;80c69eb3c456:45541 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42193/user/jenkins/test-data/a9b96857-512d-c527-b8bb-e9dfd5a1f8a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5db428399ede4523a8b3db2f7c9477e1, entries=1, sequenceid=1970, filesize=5.0 K 2024-12-09T17:24:04,402 INFO [M:0;80c69eb3c456:45541 {}] regionserver.HRegion(3040): Finished flush of dataSize ~718.66 KB/735910, heapSize ~879.44 KB/900544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1309ms, sequenceid=1970, compaction requested=false 2024-12-09T17:24:04,403 INFO [M:0;80c69eb3c456:45541 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T17:24:04,403 DEBUG [M:0;80c69eb3c456:45541 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-09T17:24:04,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38771 is added to blk_1073741830_1006 (size=865175) 2024-12-09T17:24:04,405 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-09T17:24:04,405 INFO [M:0;80c69eb3c456:45541 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-09T17:24:04,406 INFO [M:0;80c69eb3c456:45541 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:45541 2024-12-09T17:24:04,450 DEBUG [M:0;80c69eb3c456:45541 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/80c69eb3c456,45541,1733764864652 already deleted, retry=false 2024-12-09T17:24:04,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T17:24:04,559 INFO [M:0;80c69eb3c456:45541 {}] regionserver.HRegionServer(1307): Exiting; stopping=80c69eb3c456,45541,1733764864652; zookeeper connection closed. 2024-12-09T17:24:04,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45541-0x1000bcf74250000, quorum=127.0.0.1:54326, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T17:24:04,563 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bd2e890{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T17:24:04,565 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2d3fa6ef{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T17:24:04,565 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T17:24:04,565 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@63d4d645{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T17:24:04,565 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57582772{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/hadoop.log.dir/,STOPPED} 2024-12-09T17:24:04,568 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T17:24:04,568 WARN [BP-1064377711-172.17.0.2-1733764861151 heartbeating to localhost/127.0.0.1:42193 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T17:24:04,568 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T17:24:04,568 WARN [BP-1064377711-172.17.0.2-1733764861151 heartbeating to localhost/127.0.0.1:42193 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1064377711-172.17.0.2-1733764861151 (Datanode Uuid 5956346e-f1fd-43bb-ade0-bbf2c491cc6a) service to localhost/127.0.0.1:42193 2024-12-09T17:24:04,570 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/cluster_4037d738-ebcb-129a-bff6-ec03dcaba43b/dfs/data/data1/current/BP-1064377711-172.17.0.2-1733764861151 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T17:24:04,570 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/cluster_4037d738-ebcb-129a-bff6-ec03dcaba43b/dfs/data/data2/current/BP-1064377711-172.17.0.2-1733764861151 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T17:24:04,571 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T17:24:04,577 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f0d4558{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T17:24:04,578 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a299586{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T17:24:04,578 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T17:24:04,578 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@588be694{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T17:24:04,578 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73882ca4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/30dc87f6-1bff-aa69-52fa-00baca180d50/hadoop.log.dir/,STOPPED} 2024-12-09T17:24:04,593 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-09T17:24:04,699 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down